From 5ce64bd76170e84023e8be2e41754367cf4b412c Mon Sep 17 00:00:00 2001 From: "tao.gan" Date: Wed, 14 Jan 2026 16:18:10 +0800 Subject: [PATCH 01/76] [host]: filter out disks of type loop and rom Resolves: ZSV-10987 Change-Id: I7277616f7378676d696772796b766676766c706b --- .../java/org/zstack/header/host/BlockDevicesParser.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/header/src/main/java/org/zstack/header/host/BlockDevicesParser.java b/header/src/main/java/org/zstack/header/host/BlockDevicesParser.java index 477399d5a37..6a6ca080f50 100644 --- a/header/src/main/java/org/zstack/header/host/BlockDevicesParser.java +++ b/header/src/main/java/org/zstack/header/host/BlockDevicesParser.java @@ -136,9 +136,8 @@ public void setModel(String model) { " ]\n" + "}\n" + "===\r\n" + - "/dev/sr0:unknown\r\n\r\n" + "/dev/vda:\r\n" + - "/dev/vdb:loop\r\n\r\n" + + "/dev/vdb:msdos\r\n" + "/dev/vdc:unknown\r\n"; public static List parse(String blockDevices) { @@ -157,7 +156,7 @@ public static List parse(String blockDevices) { } blockDevicePartitionTable.put(blockDeviceAndPartitionTable.get(0), blockDeviceAndPartitionTable.get(1)); }); - allBlockDevices.forEach(blockDevice -> blockDevice.setPartitionTable(blockDevicePartitionTable.get(blockDevice.getName()))); + allBlockDevices.forEach(blockDevice -> blockDevice.setPartitionTable(blockDevicePartitionTable.getOrDefault(blockDevice.getName(), null))); return allBlockDevices; } @@ -173,7 +172,7 @@ public static List parse(List blockDevices) { public static String getBlockDevicesCommand() { String blockDevicesCommand = "lsblk -p -b -o NAME,TYPE,SIZE,PHY-SEC,LOG-SEC,MOUNTPOINT -J"; - String partitionTableInfoCommand = "for disk in $(lsblk -d -p -n -o NAME); do echo -n \"$disk:\"; " + + String partitionTableInfoCommand = "for disk in $(lsblk -d -p -n -o NAME,TYPE | awk '$2!~/loop|rom/ {print $1}'); do echo -n \"$disk:\"; " + "parted -s $disk print 2>/dev/null | awk '/Partition Table/ {print $3} END{print \"\"}'; done"; return String.format("%s ; echo === ; %s", blockDevicesCommand, partitionTableInfoCommand); } From fa2b6c021a97b4b254ff9d4c212073e7cc82e937 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 21 Jan 2026 19:05:16 +0800 Subject: [PATCH 02/76] [account-import]: add progress for deleting ldap server This patch is for zsv_4.10.28 Resolves: ZSV-6434 Related: ZSV-9610 Change-Id: I7763786570626f646a636b767577616a6e726172 --- .../identity/imports/source/AbstractAccountSourceBase.java | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/account-import/src/main/java/org/zstack/identity/imports/source/AbstractAccountSourceBase.java b/plugin/account-import/src/main/java/org/zstack/identity/imports/source/AbstractAccountSourceBase.java index 566a8f28888..eb8f4d8796c 100644 --- a/plugin/account-import/src/main/java/org/zstack/identity/imports/source/AbstractAccountSourceBase.java +++ b/plugin/account-import/src/main/java/org/zstack/identity/imports/source/AbstractAccountSourceBase.java @@ -842,6 +842,7 @@ public boolean skip(Map data) { @Override public void run(FlowTrigger trigger, Map data) { new While<>(splitAccountLists()) + .enableProgressReport("cleaning-stale-accounts-for-deleting-account-source") .each(this::cleanStaleAccounts) .run(new WhileDoneCompletion(trigger) { @Override From 1d840fd0e4753f2d6f1b8a0d0f98c3c230d0a1b4 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 22 Jan 2026 16:04:48 +0800 Subject: [PATCH 03/76] [conf]: update i18n json for mevoco module This patch is for zsv_4.10.28 Resolves: ZSV-11206 Change-Id: I77736869617878636b64756c656b7a666f777966 --- conf/i18n/messages_en_US.properties | 176 ++++++++++++++-------------- conf/i18n/messages_zh_CN.properties | 176 ++++++++++++++-------------- conf/i18n_json/i18n_cdp.json | 88 +++++++------- conf/i18n_json/i18n_compute.json | 10 ++ conf/i18n_json/i18n_core.json | 18 +-- conf/i18n_json/i18n_crypto.json | 56 ++++----- conf/i18n_json/i18n_mevoco.json | 58 ++++++--- conf/i18n_json/i18n_storage.json | 37 ++++-- 8 files changed, 343 insertions(+), 276 deletions(-) diff --git a/conf/i18n/messages_en_US.properties b/conf/i18n/messages_en_US.properties index ae2f001ab4b..5c78fa135d1 100755 --- a/conf/i18n/messages_en_US.properties +++ b/conf/i18n/messages_en_US.properties @@ -347,12 +347,10 @@ Unexpected\ task\ type[uuid\:\ %s,\ type\:\ %s] = Unexpected task type[uuid: {0} Max\ capacity\ not\ found\ for\ cdp\ task[uuid\:\ %s],\ please\ update\ it. = Max capacity not found for cdp task[uuid: {0}], please update it. Max\ latency\ not\ found\ for\ cdp\ task[uuid\:\ %s],\ please\ update\ it. = Max latency not found for cdp task[uuid: {0}], please update it. VM[uuid\:\ %s]\ already\ deleted = VM[uuid: {0}] already deleted -Unexpected\ VM\ state\:\ %s = Unexpected VM state: {0} Backup\ storage\ not\ found[uuid\:\ %s] = Backup storage not found[uuid: {0}] Backup\ storage[uuid\:\ %s]\ is\ disabled = Backup storage[uuid: {0}] is disabled Backup\ storage[uuid\:\ %s]\ is\ not\ connected = Backup storage[uuid: {0}] is not connected The\ vm[uuid\:\ %s]\ has\ already\ created\ a\ backup\ job,\ cannot\ enable\ the\ cdp\ task\ at\ the\ same\ time. = The vm[uuid: {0}] has already created a backup job, cannot enable the cdp task at the same time. -unexpected\ task\ type\:\ %s = unexpected task type: {0} '%s'(%d)\ should\ be\ larger\ than\ '%s'(%d) = ''{0}''({1}) should be larger than ''{2}''({3}) mandatory\ args\ missing\:\ %s = mandatory args missing: {0} parameter\ RetentionTimePerDay\ and\ DailyRpSinceDay\ cannot\ be\ equal = parameter RetentionTimePerDay and DailyRpSinceDay cannot be equal @@ -371,23 +369,24 @@ CDP\ task[uuid\:\ %s]\ has\ no\ VM\ attached = CDP task[uuid: {0}] has no VM att task[uuid\:%s]\ have\ been\ deleted = task[uuid:{0}] have been deleted Cdp\ task\ is\ merging\ data,\ cannot\ pickup. = Cdp task is merging data, cannot pickup. CDP\ task[uuid\:%s]\ exceeded\ storage\ usage\:\ maximum\ %d,\ used\ %d. = CDP task[uuid:{0}] exceeded storage usage: maximum {1}, used {2}. +Unexpected\ VM\ state\:\ %s = Unexpected VM state: {0} No\ CDP\ task\ found\ for\ VM\:\ %s = No CDP task found for VM: {0} No\ CDP\ backup\ storage\ found\ for\ VM\:\ %s = No CDP backup storage found for VM: {0} No\ CdpBackupFactory\ of\ type[%s]\ found = No CdpBackupFactory of type[{0}] found CDP\ task[uuid\:\ %s]\ not\ found = CDP task[uuid: {0}] not found +unexpected\ task\ type\:\ %s = unexpected task type: {0} The\ VM[%s]\ for\ volume[%s]\ is\ running\ CDP,\ cannot\ resize\ now. = The VM[{0}] for volume[{1}] is running CDP, cannot resize now. No\ VM\ found\ for\ CDP\ task[uuid\:\ %s] = No VM found for CDP task[uuid: {0}] BackupStorage[uuid\:\ %s]\ already\ been\ deleted = BackupStorage[uuid: {0}] already been deleted -waiting\ host[uuid\:%s]\ and\ backupStorage[uuid\:%s]\ to\ be\ Connected... = waiting host[uuid:{0}] and backupStorage[uuid:{1}] to be Connected... failed\ to\ find\ cdp\ task[uuid\:%s]\ = failed to find cdp task[uuid:{0}] VM\ CDP\ task[uuid\:\ %s]\ not\ found = VM CDP task[uuid: {0}] not found VM\ not\ found\ for\ CDP\ task[uuid\:\ %s] = VM not found for CDP task[uuid: {0}] +no\ volume\ records\ found\ from\ VM\ backup = no volume records found from VM backup multiple\ root\ volumes\ found\ from\ CDP\ backup\ %s\:%d = multiple root volumes found from CDP backup {0}:{1} cannot\ find\ root\ volume\ from\ CDP\ backup\ %s\:%d = cannot find root volume from CDP backup {0}:{1} root\ volume\ not\ found\ from\ CDP\ backup\ %s\:%d = root volume not found from CDP backup {0}:{1} recoverVm\:\ host[uuid\:\ %s]\ not\ found\ for\ VM[uuid\:\ %s] = recoverVm: host[uuid: {0}] not found for VM[uuid: {1}] multiple\ root\ volumes\ found\:\ %s = multiple root volumes found: {0} -no\ volume\ records\ found\ from\ VM\ backup = no volume records found from VM backup no\ root\ volume\ found\ from\ VM\ backup = no root volume found from VM backup volume\ %s\ contains\ in\ backup\ but\ detached\ from\ VM[uuid\:\ %s]\:\ you\ need\ to\ either\ attach\ it\ back\ or\ delete\ it = volume {0} contains in backup but detached from VM[uuid: {1}]: you need to either attach it back or delete it unexpected\ volume[uuid\:\ %s]\ size\:\ %d = unexpected volume[uuid: {0}] size: {1} @@ -397,6 +396,7 @@ Available\ License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ CD kvmagent\ restarted = kvmagent restarted kvmagent\ no\ response\ %d\ times = kvmagent no response {0} times recoverVm\:\ host\ uuid\ is\ not\ provided\ and\ original\ host\ is\ not\ found\ for\ VM[uuid\:\ %s] = recoverVm: host uuid is not provided and original host is not found for VM[uuid: {0}] +waiting\ host[uuid\:%s]\ and\ backupStorage[uuid\:%s]\ to\ be\ Connected... = waiting host[uuid:{0}] and backupStorage[uuid:{1}] to be Connected... No\ CDP\ task\ found\ for\ VM[uuid\:\ %s] = No CDP task found for VM[uuid: {0}] CDP\ task\ for\ VM[uuid\:\ %s]\ is\ not\ found\ on\ BS[uuid\:\ %s] = CDP task for VM[uuid: {0}] is not found on BS[uuid: {1}] @@ -706,6 +706,7 @@ VmInstanceStartExtensionPoint[%s]\ refuses\ to\ start\ vm[uuid\:%s] = VmInstance Not\ allowed\ same\ mac\ [%s] = Not allowed same mac [{0}] Can't\ add\ same\ uuid\ in\ the\ l3Network,uuid\:\ %s = Can''t add same uuid in the l3Network,uuid: {0} host[uuid\:%s]\ is\ specified\ but\ it\ is\ not\ in\ cluster[uuid\:%s],\ can\ not\ create\ vm\ from\ it = host[uuid:{0}] is specified but it is not in cluster[uuid:{1}], can not create vm from it +host[uuid\:%s]\ is\ specified\ but\ it\ is\ not\ in\ zone[uuid\:%s],\ can\ not\ create\ vm\ from\ it = host[uuid:{0}] is specified but it is not in zone[uuid:{1}], can not create vm from it host[uuid\:%s]\ is\ specified\ but\ it's\ Disabled,\ can\ not\ create\ vm\ from\ it = host[uuid:{0}] is specified but it''s Disabled, can not create vm from it host[uuid\:%s]\ is\ specified\ but\ its\ connection\ status\ is\ %s,\ can\ not\ create\ vm\ from\ it = host[uuid:{0}] is specified but its connection status is {1}, can not create vm from it cluster[uuid\:%s]\ is\ specified\ but\ it's\ not\ in\ zone[uuid\:%s],\ can\ not\ create\ vm\ from\ it = cluster[uuid:{0}] is specified but it''s not in zone[uuid:{1}], can not create vm from it @@ -817,10 +818,10 @@ failed\ to\ reconnect\ console\ proxy = failed to reconnect console proxy # In Module: core no\ executor\ found\ for\ resourceUuid[%s] = no executor found for resourceUuid[{0}] Multiple\ errors = Multiple errors -fail\ to\ create\ new\ File[%s] = fail to create new File[{0}] failed\ to\ run\ ansible\:\ failed\ to\ find\ target\ host = failed to run ansible: failed to find target host failed\ to\ run\ ansible = failed to run ansible User\ name\ or\ password\ or\ port\ number\ may\ be\ problematic = User name or password or port number may be problematic +fail\ to\ create\ new\ File[%s] = fail to create new File[{0}] cannot\ check\ md5sum\ of\ files\ in\ the\ folder[%s] = cannot check md5sum of files in the folder[{0}] cannot\ check\ md5sum\ of\ files\ in\ the\ folder[%s]\ on\ the\ host[ip\:%s] = cannot check md5sum of files in the folder[{0}] on the host[ip:{1}] unable\ to\ deliver\ the\ message;\ the\ destination\ service[%s]\ is\ dead;\ please\ use\ rabbitmqctl\ to\ check\ if\ the\ queue\ is\ existing\ and\ if\ any\ consumers\ on\ that\ queue = unable to deliver the message; the destination service[{0}] is dead; please use rabbitmqctl to check if the queue is existing and if any consumers on that queue @@ -863,17 +864,17 @@ api\ timeout\ cannot\ be\ set\ smaller\ than\ %s = api timeout cannot be set sma Invalid\ url[%s] = Invalid url[{0}] # In Module: crypto -the\ identity\ authentication\ does\ not\ specify\ the\ resource\ pool\ to\ provide\ the\ service = the identity authentication does not specify the resource pool to provide the service failed\ to\ find\ model\ for\ secretResourcePool\ [%s] = failed to find model for secretResourcePool [{0}] wrong\ secret\ resource\ pool\ model,\ expect\ %s,\ actual\ %s = wrong secret resource pool model, expect {0}, actual {1} -failed\ to\ connect\ remote\ crypto\ server = failed to connect remote crypto server parseCertificate\ failed\:\ %s = parseCertificate failed: {0} -additional\ authentication\ server\ raise\ an\ error = additional authentication server raise an error -additional\ authentication\ failed = additional authentication failed failed\ to\ parse\ certificate = failed to parse certificate account[uuid\=%s]\ and\ certificate\ are\ not\ matched = account[uuid={0}] and certificate are not matched no\ certificate\ attach\ to\ account[uuid\=%s] = no certificate attach to account[uuid={0}] wrong\ authenticationId\:\ %s = wrong authenticationId: {0} +the\ identity\ authentication\ does\ not\ specify\ the\ resource\ pool\ to\ provide\ the\ service = the identity authentication does not specify the resource pool to provide the service +failed\ to\ connect\ remote\ crypto\ server = failed to connect remote crypto server +additional\ authentication\ server\ raise\ an\ error = additional authentication server raise an error +additional\ authentication\ failed = additional authentication failed failed\ to\ generate\ credential = failed to generate credential failed\ to\ decrypt\ credential = failed to decrypt credential failed\ to\ parse\ plain\ text\ in\ encryption\ param\ to\ json\ object\:\ %s,\ %s = failed to parse plain text in encryption param to json object: {0}, {1} @@ -1989,7 +1990,6 @@ no\ available\ network\ interface\ on\ the\ host\ to\ start\ the\ vm = no availa vm\ security\ level\ not\ consistent\ with\ vms\ running\ on\ host = vm security level not consistent with vms running on host fail\ to\ update\ iscsi\ initiator\ name\ of\ host[uuid\:%s] = fail to update iscsi initiator name of host[uuid:{0}] networkInterface[name\:%s]\ of\ host[uuid\:%s]\ can\ not\ find = networkInterface[name:{0}] of host[uuid:{1}] can not find -host[uuid\:%s]\ can\ not\ find = host[uuid:{0}] can not find primary\ storage\ type\ doesn't\ support\ sync\ qos\ from\ host = primary storage type doesn''t support sync qos from host primary\ storage\ type\ doesn't\ support\ set\ qos = primary storage type doesn''t support set qos host[uuid\:%s]\ becomes\ power\ off,\ send\ notify = host[uuid:{0}] becomes power off, send notify @@ -2009,6 +2009,7 @@ Not\ found\ strategy[%s]\ that\ you\ request. = Not found strategy[{0}] that you Not\ enough\ resource\ on\ Host[%s]. = Not enough resource on Host[{0}]. failed\ to\ get\ interface\ vlanIds\ of\ host[uuid\:%s]\ \:\ %s = failed to get interface vlanIds of host[uuid:{0}] : {1} cluster[uuids\:%s,\ hypervisorType\:%s]\ are\ not\ exist! = cluster[uuids:{0}, hypervisorType:{1}] are not exist! +host[uuid\:%s]\ can\ not\ find = host[uuid:{0}] can not find ovs\ cpu\ pinning\ resource\ config\:[%s]\ format\ error. = ovs cpu pinning resource config:[{0}] format error. only\ %s\ support\ sr-iov = only {0} support sr-iov L3\ Network\ [uuid\:%s]\ doesn't\ exist = L3 Network [uuid:{0}] doesn''t exist @@ -2096,6 +2097,7 @@ vm[uuid\:%s]\ already\ attached\ to\ vm\ scheduling\ group[uuid\:%s] = vm[uuid:{ vm\ can\ change\ its\ vm\ scheduling\ group\ only\ in\ state\ [%s,%s],\ but\ vm\ is\ in\ state\ [%s] = vm can change its vm scheduling group only in state [{0},{1}], but vm is in state [{2}] cannot\ operate\ vpc\ vm\ scheduling\ group = cannot operate vpc vm scheduling group zoneUuid\ is\ not\ null = zoneUuid is not null +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ Affinitive\ to\ Hosts\ scheduling\ policy\ attached = the vm scheduling group has already had a vms Affinitive to Hosts scheduling policy attached, you cannot attach a vm antiaffinity from each other scheduling rule to the group again. can\ not\ satisfied\ vm\ scheduling\ rule\ group\ conditions = can not satisfied vm scheduling rule group conditions vm\ scheduling\ group[uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = vm scheduling group[uuid:{0}] reserve host [uuid:{1}] for vm [uuid: {2}] failed hostGroup[uuid\:%s]\ is\ no\ host = hostGroup[uuid:{0}] is no host @@ -2148,7 +2150,8 @@ not\ allowed\ for\ current\ license,\ please\ apply\ addon\ license\ for\ produc failed\ to\ delete\ license = failed to delete license Insufficient\ VM\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ VMs\:\ %d. = Insufficient VM number licensed. Your {0} license permits the number of VMs: {1}. Insufficient\ Host\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ Hosts\:\ %d. = Insufficient Host number licensed. Your {0} license permits the number of Hosts: {1}. -Insufficient\ CPU\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPUs\:\ %d. = Insufficient CPU number licensed. Your {0} license permits the number of CPUs: {1}. +Insufficient\ CPU\ Socket\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPU\ Sockets\:\ %d. = Insufficient CPU Socket number licensed. Your {0} license permits the number of CPU Sockets: {1}. +Insufficient\ CPU\ Core\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPU\ Cores\:\ %d. = Insufficient CPU Core number licensed. Your {0} license permits the number of CPU Cores: {1}. Insufficient\ Capacity\ licensed.\ Your\ %s\ license\ permits\ Capacity\:\ %sTB\ . = Insufficient Capacity licensed. Your {0} license permits Capacity: {1}TB . failed\ to\ delete\ license\ by\ module = failed to delete license by module No\ local\ ukey\ license\ updated = No local ukey license updated @@ -2168,6 +2171,7 @@ Unexpected\ thumbprint = Unexpected thumbprint Platform\ license\ expired. = Platform license expired. Found\ Xinchuang\ host,\ but\ the\ type\ of\ license\ does\ not\ match. = Found Xinchuang host, but the type of license does not match. hijacked\ detected.\ Your\ license[%s]\ permits\ %d\ CPU\ sockets,\ but\ consumed\ %d.\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = hijacked detected. Your license[{0}] permits {1} CPU sockets, but consumed {2}. You can either apply a new license or delete additional hosts +hijacked\ detected.\ Your\ license[%s]\ permits\ %d\ CPU\ cores,\ but\ consumed\ %d.\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = hijacked detected. Your license[{0}] permits {1} CPU cores, but consumed {2}. You can either apply a new license or delete additional hosts hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ hosts,\ but\ consumed\ %s,\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = hijacked detected. Your license[{0}] permits {1} hosts, but consumed {2}, You can either apply a new license or delete additional hosts failed\ to\ read\ edge\ license[%s]\:\ %s = failed to read edge license[{0}]: {1} for\ shareable\ volume,\ the\ only\ supported\ primary\ storage\ type\ is\ %s,\ current\ is\ %s = for shareable volume, the only supported primary storage type is {0}, current is {1} @@ -3323,7 +3327,6 @@ snapshot(s)\ %s\ in\ the\ group\ has\ been\ deleted,\ can\ only\ revert\ one\ by current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = current volume state[{0}] doesn''t allow to proceed message[{1}] failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = failed to select backup storage to download iso[uuid={0}] unable\ to\ download\ iso\ to\ primary\ storage = unable to download iso to primary storage -primary\ storage\ uuid\ conflict,\ the\ primary\ storage\ specified\ by\ the\ disk\ offering\ are\ %s,\ and\ the\ primary\ storage\ specified\ in\ the\ creation\ parameter\ is\ %s = primary storage uuid conflict, the primary storage specified by the disk offering are {0}, and the primary storage specified in the creation parameter is {1} volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = volume[uuid:{0}] is not in status Ready, current is {1}, can''t create snapshot volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = volume[uuid:{0}, type:{1}], can''t create snapshot Can\ not\ take\ memory\ snapshot,\ vm\ current\ state[%s],\ but\ expect\ state\ are\ [%s,\ %s] = Can not take memory snapshot, vm current state[{0}], but expect state are [{1}, {2}] @@ -3344,6 +3347,7 @@ the\ volume[uuid\:%s]\ is\ in\ status\ of\ deleted,\ cannot\ do\ the\ operation data\ volume[uuid\:%s]\ has\ been\ attached\ to\ some\ vm,\ can't\ attach\ again = data volume[uuid:{0}] has been attached to some vm, can''t attach again data\ volume\ can\ only\ be\ attached\ when\ status\ is\ [%s,\ %s],\ current\ is\ %s = data volume can only be attached when status is [{0}, {1}], current is {2} data\ volume[uuid\:%s]\ of\ format[%s]\ is\ not\ supported\ for\ attach\ to\ any\ hypervisor. = data volume[uuid:{0}] of format[{1}] is not supported for attach to any hypervisor. +Can\ not\ attach\ volume\ to\ vm\ runs\ on\ host[uuid\:\ %s]\ which\ is\ disconnected\ with\ volume's\ storage[uuid\:\ %s] = Can not attach volume to vm runs on host[uuid: {0}] which is disconnected with volume''s storage[uuid: {1}] it's\ not\ allowed\ to\ backup\ root\ volume,\ uuid\:%s = it''s not allowed to backup root volume, uuid:{0} unexpected\ disk\ size\ settings = unexpected disk size settings volume[uuid\:%s,\ type\:%s]\ can't\ be\ deleted = volume[uuid:{0}, type:{1}] can''t be deleted @@ -3358,6 +3362,7 @@ cannot\ flatten\ a\ shareable\ volume[uuid\:%s] = cannot flatten a shareable vol can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s] = can not found in used snapshot tree of volume[uuid: {0}] cannot\ undo\ not\ latest\ snapshot = cannot undo not latest snapshot duplicate\ volume\ uuids\:\ %s = duplicate volume uuids: {0} +invalid\ disk\ states = invalid disk states cannot\ find\ image\ cache[imageUuid\:\ %s]\ for\ reinit\ volume = cannot find image cache[imageUuid: {0}] for reinit volume the\ volume[uuid\:%s,\ name\:%s]\ is\ not\ deleted\ yet,\ can't\ expunge\ it = the volume[uuid:{0}, name:{1}] is not deleted yet, can''t expunge it volume[uuid%s]\ should\ be\ attached. = volume[uuid{0}] should be attached. @@ -3368,6 +3373,7 @@ get\ primaryStorage\ %s\ type\ failed = get primaryStorage {0} type failed primaryStorage\ type\ [%s]\ not\ support\ shared\ volume\ yet = primaryStorage type [{0}] not support shared volume yet the\ image[uuid\:%s,\ name\:%s]\ has\ been\ deleted\ on\ all\ backup\ storage = the image[uuid:{0}, name:{1}] has been deleted on all backup storage cannot\ find\ a\ backup\ storage\ on\ which\ the\ image[uuid\:%s]\ is\ that\ satisfies\ all\ conditions\ of\:\ 1.\ has\ state\ Enabled\ 2.\ has\ status\ Connected.\ 3\ has\ attached\ to\ zone\ in\ which\ primary\ storage[uuid\:%s]\ is = cannot find a backup storage on which the image[uuid:{0}] is that satisfies all conditions of: 1. has state Enabled 2. has status Connected. 3 has attached to zone in which primary storage[uuid:{1}] is +primary\ storage\ uuid\ conflict,\ the\ primary\ storage\ specified\ by\ the\ disk\ offering\ are\ %s,\ and\ the\ primary\ storage\ specified\ in\ the\ creation\ parameter\ is\ %s = primary storage uuid conflict, the primary storage specified by the disk offering are {0}, and the primary storage specified in the creation parameter is {1} target\ volume\ is\ expunged\ during\ volume\ creation = target volume is expunged during volume creation there\ should\ not\ be\ more\ than\ one\ %s\ implementation. = there should not be more than one {0} implementation. @@ -3588,7 +3594,77 @@ failed\ to\ delete\ hostKernelInterface[uuid\:%s]\ on\ the\ host[uuid\:%s],\ %s failed\ to\ refresh\ host\ kernel\ interface\ on\ host[uuid\:%s],\ %s = failed to refresh host kernel interface on host[uuid:{0}], {1} failed\ to\ get\ the\ host\ interface\ for\ the\ managementIp[%s] = failed to get the host interface for the managementIp[{0}] -# In Module: woodpecker +# In Module: volumebackup +bandWidth\ must\ be\ a\ positive\ number = bandWidth must be a positive number +missing\ 'retentionType'\ in\ job\ parameters = missing ''retentionType'' in job parameters +missing\ 'retentionValue'\ in\ job\ parameters = missing ''retentionValue'' in job parameters +missing\ 'backupStorageUuids'\ in\ job\ parameters = missing ''backupStorageUuids'' in job parameters +job\ parameter\ 'backupStorageUuids'\ is\ empty = job parameter ''backupStorageUuids'' is empty +unexpected\ backup\ storage\ uuid\:\ %s = unexpected backup storage uuid: {0} +missing\ 'remoteRetentionType'\ in\ job\ parameters = missing ''remoteRetentionType'' in job parameters +missing\ 'remoteRetentionValue'\ in\ job\ parameters = missing ''remoteRetentionValue'' in job parameters +missing\ job\ parameters = missing job parameters +No\ available\ backup\ storage\ found,\ skip\ this\ job = No available backup storage found, skip this job +database\ backup[uuid%s]\ has\ not\ been\ exported\ from\ backupStorage[uuid\:%s] = database backup[uuid{0}] has not been exported from backupStorage[uuid:{1}] +database\ backup[uuid%s]\ has\ been\ exported\ from\ backupStorage[uuid\:%s] = database backup[uuid{0}] has been exported from backupStorage[uuid:{1}] +do\ not\ allow\ cover\ database\ from\ backup = do not allow cover database from backup +installPath\ and\ bsUrl\ are\ both\ need = installPath and bsUrl are both need +databaseBackup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = databaseBackup[uuid:{0}] is not Enabled and Ready +illegal\ url[%s],\ correct\ example\ is\ ssh\://username\:password@hostname[\:sshPort]/path = illegal url[{0}], correct example is ssh://username:password@hostname[:sshPort]/path +database\ backup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = database backup[uuid:{0}] is not Enabled and Ready +One\ of\ the\ backup\ storage[uuids\:\ %s,\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = One of the backup storage[uuids: {0}, {1}] is in the state of {2}, can not do sync operation +database\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = database backup[uuid:{0}] not found in backup storage[uuid:{1}] +sync\ task\ failed. = sync task failed. +unexpected\ task\ status\:\ %s = unexpected task status: {0} +database\ backup\ [uuid\:%s]\ is\ not\ existed\ yet = database backup [uuid:{0}] is not existed yet +backup\ storage[uuid\:%s]\ is\ not\ enabled\ and\ connected = backup storage[uuid:{0}] is not enabled and connected +not\ pass\ the\ restore\ security\ check\:\\n%s = not pass the restore security check:\\n{0} +cannot\ get\ free\ port\ to\ listen = cannot get free port to listen +database\ backup\ version[%s]\ is\ not\ match\ currently\ version[%s] = database backup version[{0}] is not match currently version[{1}] +cannot\ ssh\ peer\ node\ via\ sshkey,\ please\ check\ connection = cannot ssh peer node via sshkey, please check connection +please\ stop\ other\ node\ first! = please stop other node first! +current\ backup\ storage\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = current backup storage state[{0}] doesn''t allow to proceed message[{1}], allowed states are {2} +Unexpected\ backup\ storage[type\:%s,uuid\:%s] = Unexpected backup storage[type:{0},uuid:{1}] +Can\ not\ create\ volume\ backup\ for\ shareable\ volume[uuid\:%s] = Can not create volume backup for shareable volume[uuid:{0}] +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ it\ is\ not\ attached\ to\ any\ vm = Failed to create volume backup for volume[uuid:{0}], because it is not attached to any vm +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ the\ vm\ is\ not\ in\ state[%s,\ %s] = Failed to create volume backup for volume[uuid:{0}], because the vm is not in state[{1}, {2}] +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ its\ attached\ volume\ is\ not\ in\ state[%s,\ %s] = Failed to create volume backup for volume[uuid:{0}], because its attached volume is not in state[{1}, {2}] +Volume[uuid\:%s]\ is\ not\ root\ volume = Volume[uuid:{0}] is not root volume +Failed\ to\ create\ backups\ for\ VM[uuid\:%s],\ because\ it\ is\ not\ in\ state[%s,\ %s] = Failed to create backups for VM[uuid:{0}], because it is not in state[{1}, {2}] +The\ resource[uuid\:\ %s]\ has\ already\ created\ a\ cdp\ task,\ cannot\ create\ the\ backup\ job\ at\ the\ same\ time. = The resource[uuid: {0}] has already created a cdp task, cannot create the backup job at the same time. +No\ volume\ backup\ found\ for\ group\ uuid\:\ %s = No volume backup found for group uuid: {0} +root\ volume\ backup\ of\ group[uuid\:%s]\ not\ found = root volume backup of group[uuid:{0}] not found +Current\ vm[uuid\:\ %s]\ of\ the\ volume[uuid\:\ %s]\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = Current vm[uuid: {0}] of the volume[uuid: {1}] is no longer the vm[uuid: {2}] that was used for backup +instanceOfferingUuid\ or\ cpuNum\ and\ memorySize\ must\ be\ set = instanceOfferingUuid or cpuNum and memorySize must be set +cannot\ specify\ primary\ storage\ which\ attached\ different\ cluster. = cannot specify primary storage which attached different cluster. +cannot\ create\ vm\ from\ volume\ backup[uuid\:%s]\ which\ is\ not\ root\ volume\ backup = cannot create vm from volume backup[uuid:{0}] which is not root volume backup +volume\ backup[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = volume backup[uuid:{0}] is in state {1}, cannot revert volume to it +original\ volume\ for\ backup[uuid\:%s]\ has\ been\ deleted,\ cannot\ revert\ volume\ to\ it = original volume for backup[uuid:{0}] has been deleted, cannot revert volume to it +original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ vm[uuid\:%s] = original volume[uuid:{0}] for backup[uuid:{1}] is no longer attached to vm[uuid:{2}] +VM\ not\ found\ with\ volume\ backup[uuid\:%s] = VM not found with volume backup[uuid:{0}] +VM\ is\ not\ in\ stopped\ state\:\ %s = VM is not in stopped state: {0} +No\ available\ backup\ storage\ found = No available backup storage found +cannot\ find\ volume\ backup[uuid\:%s] = cannot find volume backup[uuid:{0}] +the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = the cluster of vm[{0}] is not in the same cluster as the primaryStorage[{1}] +Operation\ not\ supported\ on\ shared\ volume = Operation not supported on shared volume +No\ VM\ found\ for\ volume[uuid\:%s] = No VM found for volume[uuid:{0}] +No\ VM\ found\ with\ root\ volume\ uuid\:\ %s = No VM found with root volume uuid: {0} +failed\ to\ create\ image\ from\ backup\ %s = failed to create image from backup {0} +sync\ volume\ backup\ metadata\ file\ in\ image\ store[uuid\:%s]\ meet\ I/O\ error\:\ %s = sync volume backup metadata file in image store[uuid:{0}] meet I/O error: {1} +Current\ vm[uuid\:\ %s]\ of\ the\ backup\ volume\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = Current vm[uuid: {0}] of the backup volume is no longer the vm[uuid: {1}] that was used for backup +No\ VolumeBackupFactory\ of\ type[%s]\ found = No VolumeBackupFactory of type[{0}] found +One\ of\ the\ backup\ storage[uuid\:\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = One of the backup storage[uuid: {0}] is in the state of {1}, can not do sync operation +Volume\ backup[uuid\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = Volume backup[uuid:{0}] not found on backup storage[uuid:{1}] +volume\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = volume backup[uuid:{0}] not found in backup storage[uuid:{1}] +No\ volume\ backups\ found\ with\ group\ uuid\:\ %s = No volume backups found with group uuid: {0} +Root\ volume\ missing\ within\ group\ uuid\:\ %s = Root volume missing within group uuid: {0} +Multiple\ root\ volumes\ found\ within\ group\ uuid\:\ %s = Multiple root volumes found within group uuid: {0} +No\ permission\ to\ volume\ backups\ within\ group\ uuid\:\ %s = No permission to volume backups within group uuid: {0} +Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = Volume backup[uuid:{0}] not found on any backup storage +degree\ [%s]\ should\ be\ a\ positive\ number = degree [{0}] should be a positive number +invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = invalid type[{0}], should be [nfs, sshfs, nbd] +invalid\ url[%s],\ should\ be\ hostname\:/path = invalid url[{0}], should be hostname:/path +volume\ backup\ metadata\ operation\ failure,\ because\ %s = volume backup metadata operation failure, because {0} # In Module: vpc Network\ [uuid\:\ %s]\ does't\ not\ have\ IPsec\ service = Network [uuid: {0}] does''t not have IPsec service @@ -3753,6 +3829,8 @@ ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = ip[{0}] %s\:is\ not\ ipv4 = {0}:is not ipv4 vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = vxlan vtep address for host [uuid : {0}] and pool [uuid : {1}] pair already existed +# In Module: woodpecker + # In Module: xdragon xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = xdragon host not support create vm using an iso image. @@ -3907,75 +3985,3 @@ GreaterThan = GreaterThan LessThan = LessThan LessThanOrEqualTo = LessThanOrEqualTo resource[%s]\ doesn't\ support\ zwatch\ return\ with\ clause = resource[{0}] doesn''t support zwatch return with clause - -# In Module: volumebackup -bandWidth\ must\ be\ a\ positive\ number = bandWidth must be a positive number -missing\ 'retentionType'\ in\ job\ parameters = missing ''retentionType'' in job parameters -missing\ 'retentionValue'\ in\ job\ parameters = missing ''retentionValue'' in job parameters -missing\ 'backupStorageUuids'\ in\ job\ parameters = missing ''backupStorageUuids'' in job parameters -job\ parameter\ 'backupStorageUuids'\ is\ empty = job parameter ''backupStorageUuids'' is empty -unexpected\ backup\ storage\ uuid\:\ %s = unexpected backup storage uuid: {0} -missing\ 'remoteRetentionType'\ in\ job\ parameters = missing ''remoteRetentionType'' in job parameters -missing\ 'remoteRetentionValue'\ in\ job\ parameters = missing ''remoteRetentionValue'' in job parameters -missing\ job\ parameters = missing job parameters -No\ available\ backup\ storage\ found,\ skip\ this\ job = No available backup storage found, skip this job -database\ backup[uuid%s]\ has\ not\ been\ exported\ from\ backupStorage[uuid\:%s] = database backup[uuid{0}] has not been exported from backupStorage[uuid:{1}] -database\ backup[uuid%s]\ has\ been\ exported\ from\ backupStorage[uuid\:%s] = database backup[uuid{0}] has been exported from backupStorage[uuid:{1}] -do\ not\ allow\ cover\ database\ from\ backup = do not allow cover database from backup -installPath\ and\ bsUrl\ are\ both\ need = installPath and bsUrl are both need -databaseBackup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = databaseBackup[uuid:{0}] is not Enabled and Ready -illegal\ url[%s],\ correct\ example\ is\ ssh\://username\:password@hostname[\:sshPort]/path = illegal url[{0}], correct example is ssh://username:password@hostname[:sshPort]/path -database\ backup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = database backup[uuid:{0}] is not Enabled and Ready -One\ of\ the\ backup\ storage[uuids\:\ %s,\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = One of the backup storage[uuids: {0}, {1}] is in the state of {2}, can not do sync operation -database\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = database backup[uuid:{0}] not found in backup storage[uuid:{1}] -sync\ task\ failed. = sync task failed. -unexpected\ task\ status\:\ %s = unexpected task status: {0} -database\ backup\ [uuid\:%s]\ is\ not\ existed\ yet = database backup [uuid:{0}] is not existed yet -backup\ storage[uuid\:%s]\ is\ not\ enabled\ and\ connected = backup storage[uuid:{0}] is not enabled and connected -not\ pass\ the\ restore\ security\ check\:\\n%s = not pass the restore security check:\\n{0} -cannot\ get\ free\ port\ to\ listen = cannot get free port to listen -database\ backup\ version[%s]\ is\ not\ match\ currently\ version[%s] = database backup version[{0}] is not match currently version[{1}] -cannot\ ssh\ peer\ node\ via\ sshkey,\ please\ check\ connection = cannot ssh peer node via sshkey, please check connection -please\ stop\ other\ node\ first! = please stop other node first! -current\ backup\ storage\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = current backup storage state[{0}] doesn''t allow to proceed message[{1}], allowed states are {2} -Unexpected\ backup\ storage[type\:%s,uuid\:%s] = Unexpected backup storage[type:{0},uuid:{1}] -Can\ not\ create\ volume\ backup\ for\ shareable\ volume[uuid\:%s] = Can not create volume backup for shareable volume[uuid:{0}] -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ it\ is\ not\ attached\ to\ any\ vm = Failed to create volume backup for volume[uuid:{0}], because it is not attached to any vm -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ the\ vm\ is\ not\ in\ state[%s,\ %s] = Failed to create volume backup for volume[uuid:{0}], because the vm is not in state[{1}, {2}] -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ its\ attached\ volume\ is\ not\ in\ state[%s,\ %s] = Failed to create volume backup for volume[uuid:{0}], because its attached volume is not in state[{1}, {2}] -Volume[uuid\:%s]\ is\ not\ root\ volume = Volume[uuid:{0}] is not root volume -Failed\ to\ create\ backups\ for\ VM[uuid\:%s],\ because\ it\ is\ not\ in\ state[%s,\ %s] = Failed to create backups for VM[uuid:{0}], because it is not in state[{1}, {2}] -The\ resource[uuid\:\ %s]\ has\ already\ created\ a\ cdp\ task,\ cannot\ create\ the\ backup\ job\ at\ the\ same\ time. = The resource[uuid: {0}] has already created a cdp task, cannot create the backup job at the same time. -No\ volume\ backup\ found\ for\ group\ uuid\:\ %s = No volume backup found for group uuid: {0} -root\ volume\ backup\ of\ group[uuid\:%s]\ not\ found = root volume backup of group[uuid:{0}] not found -Current\ vm[uuid\:\ %s]\ of\ the\ volume[uuid\:\ %s]\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = Current vm[uuid: {0}] of the volume[uuid: {1}] is no longer the vm[uuid: {2}] that was used for backup -instanceOfferingUuid\ or\ cpuNum\ and\ memorySize\ must\ be\ set = instanceOfferingUuid or cpuNum and memorySize must be set -cannot\ specify\ primary\ storage\ which\ attached\ different\ cluster. = cannot specify primary storage which attached different cluster. -cannot\ create\ vm\ from\ volume\ backup[uuid\:%s]\ which\ is\ not\ root\ volume\ backup = cannot create vm from volume backup[uuid:{0}] which is not root volume backup -volume\ backup[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = volume backup[uuid:{0}] is in state {1}, cannot revert volume to it -original\ volume\ for\ backup[uuid\:%s]\ has\ been\ deleted,\ cannot\ revert\ volume\ to\ it = original volume for backup[uuid:{0}] has been deleted, cannot revert volume to it -original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ vm[uuid\:%s] = original volume[uuid:{0}] for backup[uuid:{1}] is no longer attached to vm[uuid:{2}] -VM\ not\ found\ with\ volume\ backup[uuid\:%s] = VM not found with volume backup[uuid:{0}] -VM\ is\ not\ in\ stopped\ state\:\ %s = VM is not in stopped state: {0} -No\ available\ backup\ storage\ found = No available backup storage found -cannot\ find\ volume\ backup[uuid\:%s] = cannot find volume backup[uuid:{0}] -the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = the cluster of vm[{0}] is not in the same cluster as the primaryStorage[{1}] -Operation\ not\ supported\ on\ shared\ volume = Operation not supported on shared volume -No\ VM\ found\ for\ volume[uuid\:%s] = No VM found for volume[uuid:{0}] -No\ VM\ found\ with\ root\ volume\ uuid\:\ %s = No VM found with root volume uuid: {0} -failed\ to\ create\ image\ from\ backup\ %s = failed to create image from backup {0} -sync\ volume\ backup\ metadata\ file\ in\ image\ store[uuid\:%s]\ meet\ I/O\ error\:\ %s = sync volume backup metadata file in image store[uuid:{0}] meet I/O error: {1} -Current\ vm[uuid\:\ %s]\ of\ the\ backup\ volume\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = Current vm[uuid: {0}] of the backup volume is no longer the vm[uuid: {1}] that was used for backup -No\ VolumeBackupFactory\ of\ type[%s]\ found = No VolumeBackupFactory of type[{0}] found -One\ of\ the\ backup\ storage[uuid\:\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = One of the backup storage[uuid: {0}] is in the state of {1}, can not do sync operation -Volume\ backup[uuid\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = Volume backup[uuid:{0}] not found on backup storage[uuid:{1}] -volume\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = volume backup[uuid:{0}] not found in backup storage[uuid:{1}] -No\ volume\ backups\ found\ with\ group\ uuid\:\ %s = No volume backups found with group uuid: {0} -Root\ volume\ missing\ within\ group\ uuid\:\ %s = Root volume missing within group uuid: {0} -Multiple\ root\ volumes\ found\ within\ group\ uuid\:\ %s = Multiple root volumes found within group uuid: {0} -No\ permission\ to\ volume\ backups\ within\ group\ uuid\:\ %s = No permission to volume backups within group uuid: {0} -Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = Volume backup[uuid:{0}] not found on any backup storage -degree\ [%s]\ should\ be\ a\ positive\ number = degree [{0}] should be a positive number -invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = invalid type[{0}], should be [nfs, sshfs, nbd] -invalid\ url[%s],\ should\ be\ hostname\:/path = invalid url[{0}], should be hostname:/path -volume\ backup\ metadata\ operation\ failure,\ because\ %s = volume backup metadata operation failure, because {0} diff --git a/conf/i18n/messages_zh_CN.properties b/conf/i18n/messages_zh_CN.properties index 613440356a9..2620368db78 100755 --- a/conf/i18n/messages_zh_CN.properties +++ b/conf/i18n/messages_zh_CN.properties @@ -347,12 +347,10 @@ Unexpected\ task\ type[uuid\:\ %s,\ type\:\ %s] = 意外的任务类型[uuid:{ Max\ capacity\ not\ found\ for\ cdp\ task[uuid\:\ %s],\ please\ update\ it. = Max\ latency\ not\ found\ for\ cdp\ task[uuid\:\ %s],\ please\ update\ it. = VM[uuid\:\ %s]\ already\ deleted = 虚拟机[uuid:{0}]已删除 -Unexpected\ VM\ state\:\ %s = 意外的VM状态:{0} Backup\ storage\ not\ found[uuid\:\ %s] = 未找到备份存储[uuid:{0}] Backup\ storage[uuid\:\ %s]\ is\ disabled = 备份存储[uuid:{0}]已禁用 Backup\ storage[uuid\:\ %s]\ is\ not\ connected = 备份存储[uuid:{0}]未连接 The\ vm[uuid\:\ %s]\ has\ already\ created\ a\ backup\ job,\ cannot\ enable\ the\ cdp\ task\ at\ the\ same\ time. = VM[uuid:{0}]已创建备份作业,无法同时启用CDP任务。 -unexpected\ task\ type\:\ %s = 意外的任务类型:{0} '%s'(%d)\ should\ be\ larger\ than\ '%s'(%d) = “{0}”({1})应大于“{2}”({3}) mandatory\ args\ missing\:\ %s = 缺少必需的参数:{0} parameter\ RetentionTimePerDay\ and\ DailyRpSinceDay\ cannot\ be\ equal = @@ -371,23 +369,24 @@ CDP\ task[uuid\:\ %s]\ has\ no\ VM\ attached = CDP任务[uuid:{0}]未连接VM task[uuid\:%s]\ have\ been\ deleted = 任务[uuid:{0}]已被删除 Cdp\ task\ is\ merging\ data,\ cannot\ pickup. = CDP\ task[uuid\:%s]\ exceeded\ storage\ usage\:\ maximum\ %d,\ used\ %d. = CDP任务[uuid:{0}]超出了存储使用率:最大值{1},已使用{2}。 +Unexpected\ VM\ state\:\ %s = 意外的VM状态:{0} No\ CDP\ task\ found\ for\ VM\:\ %s = 未找到VM{0}的CDP任务 No\ CDP\ backup\ storage\ found\ for\ VM\:\ %s = 未找到虚拟机{0}的CDP备份存储 No\ CdpBackupFactory\ of\ type[%s]\ found = 未找到类型为[{0}]的CDPBackupFactory CDP\ task[uuid\:\ %s]\ not\ found = 未找到CDP任务[uuid:{0}] +unexpected\ task\ type\:\ %s = 意外的任务类型:{0} The\ VM[%s]\ for\ volume[%s]\ is\ running\ CDP,\ cannot\ resize\ now. = 卷[{1}]的VM[{0}]正在运行CDP,现在无法调整大小。 No\ VM\ found\ for\ CDP\ task[uuid\:\ %s] = 未找到CDP任务[uuid:{0}]的VM BackupStorage[uuid\:\ %s]\ already\ been\ deleted = BackupStorage[uuid:{0}]已删除 -waiting\ host[uuid\:%s]\ and\ backupStorage[uuid\:%s]\ to\ be\ Connected... = 正在等待要连接的主机[uuid:{0}]和备份存储[uuid:{1}].. failed\ to\ find\ cdp\ task[uuid\:%s]\ = VM\ CDP\ task[uuid\:\ %s]\ not\ found = 找不到VM CDP任务[uuid:{0}] VM\ not\ found\ for\ CDP\ task[uuid\:\ %s] = 未找到CDP任务[uuid:{0}]的VM +no\ volume\ records\ found\ from\ VM\ backup = 未从虚拟机备份中找到卷记录 multiple\ root\ volumes\ found\ from\ CDP\ backup\ %s\:%d = 从CDP备份{0}中找到多个根卷:{1} cannot\ find\ root\ volume\ from\ CDP\ backup\ %s\:%d = 无法从CDP备份{0}中找到根卷:{1} root\ volume\ not\ found\ from\ CDP\ backup\ %s\:%d = 未从CDP备份{0}中找到根卷:{1} recoverVm\:\ host[uuid\:\ %s]\ not\ found\ for\ VM[uuid\:\ %s] = 未找到VM[uuid:{1}]的RecoverVM:物理机[uuid:{0}] multiple\ root\ volumes\ found\:\ %s = 找到多个根卷:{0} -no\ volume\ records\ found\ from\ VM\ backup = 未从虚拟机备份中找到卷记录 no\ root\ volume\ found\ from\ VM\ backup = 未从虚拟机备份中找到根卷 volume\ %s\ contains\ in\ backup\ but\ detached\ from\ VM[uuid\:\ %s]\:\ you\ need\ to\ either\ attach\ it\ back\ or\ delete\ it = 卷{0}包含在备份中,但已从VM分离[uuid:{1}]:您需要将其重新连接或将其删除 unexpected\ volume[uuid\:\ %s]\ size\:\ %d = 意外卷[uuid:{0}]大小:{1} @@ -397,6 +396,7 @@ Available\ License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ CD kvmagent\ restarted = KVMAGENT重新启动 kvmagent\ no\ response\ %d\ times = KVMAgent无响应{0}次 recoverVm\:\ host\ uuid\ is\ not\ provided\ and\ original\ host\ is\ not\ found\ for\ VM[uuid\:\ %s] = RecoverVM:未提供物理机uuid,并且未找到VM[uuid:{0}]的原始物理机 +waiting\ host[uuid\:%s]\ and\ backupStorage[uuid\:%s]\ to\ be\ Connected... = 正在等待要连接的主机[uuid:{0}]和备份存储[uuid:{1}].. No\ CDP\ task\ found\ for\ VM[uuid\:\ %s] = 找不到VM[uuid:{0}]的CDP任务 CDP\ task\ for\ VM[uuid\:\ %s]\ is\ not\ found\ on\ BS[uuid\:\ %s] = 在BS[uuid:{1}]上找不到VM[uuid:{0}]的CDP任务 @@ -706,6 +706,7 @@ VmInstanceStartExtensionPoint[%s]\ refuses\ to\ start\ vm[uuid\:%s] = VmInstance Not\ allowed\ same\ mac\ [%s] = 不允许存在相同的MAC地址[{0}] Can't\ add\ same\ uuid\ in\ the\ l3Network,uuid\:\ %s = 不能添加相同uuid[{0}]的分布式端口组 host[uuid\:%s]\ is\ specified\ but\ it\ is\ not\ in\ cluster[uuid\:%s],\ can\ not\ create\ vm\ from\ it = 指定了主机[uuid:{0}],但它不在集群[uuid:{1}]中,无法从其创建虚拟机 +host[uuid\:%s]\ is\ specified\ but\ it\ is\ not\ in\ zone[uuid\:%s],\ can\ not\ create\ vm\ from\ it = host[uuid\:%s]\ is\ specified\ but\ it's\ Disabled,\ can\ not\ create\ vm\ from\ it = 主机[uuid:{0}]虽然被指定了但是处于未启用状态,不能从这上面创建虚拟机 host[uuid\:%s]\ is\ specified\ but\ its\ connection\ status\ is\ %s,\ can\ not\ create\ vm\ from\ it = cluster[uuid\:%s]\ is\ specified\ but\ it's\ not\ in\ zone[uuid\:%s],\ can\ not\ create\ vm\ from\ it = @@ -817,10 +818,10 @@ failed\ to\ reconnect\ console\ proxy = 重连控制台代理失败 # In Module: core no\ executor\ found\ for\ resourceUuid[%s] = Multiple\ errors = 多重错误触发,详情请查看 causes -fail\ to\ create\ new\ File[%s] = 无法创建新文件[{0}] failed\ to\ run\ ansible\:\ failed\ to\ find\ target\ host = failed\ to\ run\ ansible = User\ name\ or\ password\ or\ port\ number\ may\ be\ problematic = 用户名、密码或者端口可能是错误的 +fail\ to\ create\ new\ File[%s] = 无法创建新文件[{0}] cannot\ check\ md5sum\ of\ files\ in\ the\ folder[%s] = cannot\ check\ md5sum\ of\ files\ in\ the\ folder[%s]\ on\ the\ host[ip\:%s] = unable\ to\ deliver\ the\ message;\ the\ destination\ service[%s]\ is\ dead;\ please\ use\ rabbitmqctl\ to\ check\ if\ the\ queue\ is\ existing\ and\ if\ any\ consumers\ on\ that\ queue = @@ -863,17 +864,17 @@ api\ timeout\ cannot\ be\ set\ smaller\ than\ %s = API超时不能设置为小 Invalid\ url[%s] = 无效的URL[{0}] # In Module: crypto -the\ identity\ authentication\ does\ not\ specify\ the\ resource\ pool\ to\ provide\ the\ service = 身份认证未指定提供服务的资源池 failed\ to\ find\ model\ for\ secretResourcePool\ [%s] = 找不到SecretResourcePool[{0}]的原型 wrong\ secret\ resource\ pool\ model,\ expect\ %s,\ actual\ %s = 机密资源池模型错误,应为{0},实际为{1} -failed\ to\ connect\ remote\ crypto\ server = 连接到远程密钥(密评)服务器失败 parseCertificate\ failed\:\ %s = -additional\ authentication\ server\ raise\ an\ error = -additional\ authentication\ failed = failed\ to\ parse\ certificate = 解析证书失败 account[uuid\=%s]\ and\ certificate\ are\ not\ matched = no\ certificate\ attach\ to\ account[uuid\=%s] = wrong\ authenticationId\:\ %s = +the\ identity\ authentication\ does\ not\ specify\ the\ resource\ pool\ to\ provide\ the\ service = 身份认证未指定提供服务的资源池 +failed\ to\ connect\ remote\ crypto\ server = 连接到远程密钥(密评)服务器失败 +additional\ authentication\ server\ raise\ an\ error = +additional\ authentication\ failed = failed\ to\ generate\ credential = failed\ to\ decrypt\ credential = failed\ to\ parse\ plain\ text\ in\ encryption\ param\ to\ json\ object\:\ %s,\ %s = 无法将加密参数中的纯文本解析为JSON对象:{0},{1} @@ -1989,7 +1990,6 @@ no\ available\ network\ interface\ on\ the\ host\ to\ start\ the\ vm = 主机上 vm\ security\ level\ not\ consistent\ with\ vms\ running\ on\ host = 虚拟机安全级别与主机上运行的虚拟机不一致 fail\ to\ update\ iscsi\ initiator\ name\ of\ host[uuid\:%s] = networkInterface[name\:%s]\ of\ host[uuid\:%s]\ can\ not\ find = 找不到主机[uuid:{1}]的网络接口[名称:{0}] -host[uuid\:%s]\ can\ not\ find = 找不到主机[uuid:{0}] primary\ storage\ type\ doesn't\ support\ sync\ qos\ from\ host = primary\ storage\ type\ doesn't\ support\ set\ qos = host[uuid\:%s]\ becomes\ power\ off,\ send\ notify = @@ -2009,6 +2009,7 @@ Not\ found\ strategy[%s]\ that\ you\ request. = Not\ enough\ resource\ on\ Host[%s]. = failed\ to\ get\ interface\ vlanIds\ of\ host[uuid\:%s]\ \:\ %s = cluster[uuids\:%s,\ hypervisorType\:%s]\ are\ not\ exist! = 集群[uuid:{0},HypervisorType:{1}]不存在! +host[uuid\:%s]\ can\ not\ find = 找不到主机[uuid:{0}] ovs\ cpu\ pinning\ resource\ config\:[%s]\ format\ error. = OVS CPU固定资源配置:[{0}]格式错误。 only\ %s\ support\ sr-iov = 仅{0}支持SR-IOV L3\ Network\ [uuid\:%s]\ doesn't\ exist = 三层网络[uuid:{0}]不存在 @@ -2096,6 +2097,7 @@ vm[uuid\:%s]\ already\ attached\ to\ vm\ scheduling\ group[uuid\:%s] = 虚拟机 vm\ can\ change\ its\ vm\ scheduling\ group\ only\ in\ state\ [%s,%s],\ but\ vm\ is\ in\ state\ [%s] = VM只能在状态[{0},{1}]下更改其VM调度组,但VM处于状态[{2}] cannot\ operate\ vpc\ vm\ scheduling\ group = 无法运行VPC虚拟机调度组 zoneUuid\ is\ not\ null = zoneUuid不为空 +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ Affinitive\ to\ Hosts\ scheduling\ policy\ attached = 该虚拟机调度组已绑定聚集虚拟机调度策略,不可绑定其它虚拟机调度策略 can\ not\ satisfied\ vm\ scheduling\ rule\ group\ conditions = 无法满足VM调度规则组条件 vm\ scheduling\ group[uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = 虚拟机调度组[uuid:{0}]为虚拟机[uuid:{2}]保留主机[UuId:{1}]失败 hostGroup[uuid\:%s]\ is\ no\ host = 主机组[uuid:{0}]不是主机 @@ -2148,7 +2150,8 @@ not\ allowed\ for\ current\ license,\ please\ apply\ addon\ license\ for\ produc failed\ to\ delete\ license = 删除许可证失败 Insufficient\ VM\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ VMs\:\ %d. = 许可证的虚拟机配额数量不足。您的 {0} 许可证允许的虚拟机数量为 {1} Insufficient\ Host\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ Hosts\:\ %d. = 许可证的主机配额数量不足。您的 {0} 许可证允许的主机数量为 {1} -Insufficient\ CPU\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPUs\:\ %d. = 许可证的 CPU 配额数量不足。您的 {0} 许可证允许的 CPU 数量为 {1} +Insufficient\ CPU\ Socket\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPU\ Sockets\:\ %d. = 许可证的 CPU Socket 数配额数量不足。您的 {0} 许可证允许的 CPU Socket 数量为 {1} +Insufficient\ CPU\ Core\ number\ licensed.\ Your\ %s\ license\ permits\ the\ number\ of\ CPU\ Cores\:\ %d. = 许可证的 CPU 核心数配额数量不足。您的 {0} 许可证允许的 CPU 核心数量为 {1} Insufficient\ Capacity\ licensed.\ Your\ %s\ license\ permits\ Capacity\:\ %sTB\ . = 许可证的存储配额数量不足。您的 {0} 许可证允许的存储为 {1} TB failed\ to\ delete\ license\ by\ module = 删除指定模块的许可证失败 No\ local\ ukey\ license\ updated = 没有 UKey 许可证更新 @@ -2168,6 +2171,7 @@ Unexpected\ thumbprint = 未期望的错误 Platform\ license\ expired. = 平台许可证已过期 Found\ Xinchuang\ host,\ but\ the\ type\ of\ license\ does\ not\ match. = 当前许可证不支持信创类型的主机 hijacked\ detected.\ Your\ license[%s]\ permits\ %d\ CPU\ sockets,\ but\ consumed\ %d.\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = {0} 许可证配额为 {1},已使用 {2} 个 CPU 核数。请上传新的许可证或删除多余的主机 +hijacked\ detected.\ Your\ license[%s]\ permits\ %d\ CPU\ cores,\ but\ consumed\ %d.\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ hosts,\ but\ consumed\ %s,\ You\ can\ either\ apply\ a\ new\ license\ or\ delete\ additional\ hosts = {0} 许可证配额为 {1},已使用 {2} 个主机。请上传新的许可证或删除多余的主机 failed\ to\ read\ edge\ license[%s]\:\ %s = 读取 Edge 许可证 {0} 失败:{1} for\ shareable\ volume,\ the\ only\ supported\ primary\ storage\ type\ is\ %s,\ current\ is\ %s = 共享云盘仅支持在主存储类型为{0}的主存储上使用,当前的类型为{1} @@ -3323,7 +3327,6 @@ snapshot(s)\ %s\ in\ the\ group\ has\ been\ deleted,\ can\ only\ revert\ one\ by current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = unable\ to\ download\ iso\ to\ primary\ storage = -primary\ storage\ uuid\ conflict,\ the\ primary\ storage\ specified\ by\ the\ disk\ offering\ are\ %s,\ and\ the\ primary\ storage\ specified\ in\ the\ creation\ parameter\ is\ %s = 主存储组冲突,硬盘规格指定的数据存储组是 {0},而创建参数指定的数据存储组是 {1} volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = 硬盘[uuid:{0}]未出于就绪状态,当前是{1},不能创建快照 volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = 卷[uuid:{0},类型:{1}],无法创建快照 Can\ not\ take\ memory\ snapshot,\ vm\ current\ state[%s],\ but\ expect\ state\ are\ [%s,\ %s] = 无法获取内存快照,VM当前状态为[{0}],但预期状态为[{1},{2}] @@ -3344,6 +3347,7 @@ the\ volume[uuid\:%s]\ is\ in\ status\ of\ deleted,\ cannot\ do\ the\ operation data\ volume[uuid\:%s]\ has\ been\ attached\ to\ some\ vm,\ can't\ attach\ again = 硬盘[uuid:{0}]已经被加载上虚拟机了,不能再次加载 data\ volume\ can\ only\ be\ attached\ when\ status\ is\ [%s,\ %s],\ current\ is\ %s = 硬盘仅能当处于[{0}, {1}]状态挂载,当前状态是{2} data\ volume[uuid\:%s]\ of\ format[%s]\ is\ not\ supported\ for\ attach\ to\ any\ hypervisor. = +Can\ not\ attach\ volume\ to\ vm\ runs\ on\ host[uuid\:\ %s]\ which\ is\ disconnected\ with\ volume's\ storage[uuid\:\ %s] = it's\ not\ allowed\ to\ backup\ root\ volume,\ uuid\:%s = 备份硬盘不被允许,uuid:{0} unexpected\ disk\ size\ settings = 意外的磁盘大小设置 volume[uuid\:%s,\ type\:%s]\ can't\ be\ deleted = 无法删除卷[uuid:{0},类型:{1}] @@ -3358,6 +3362,7 @@ cannot\ flatten\ a\ shareable\ volume[uuid\:%s] = 无法平整可共享的卷[uu can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s] = 在卷[uuid:{0}]的已使用快照树中找不到 cannot\ undo\ not\ latest\ snapshot = 无法撤消不是最新的快照 duplicate\ volume\ uuids\:\ %s = +invalid\ disk\ states = cannot\ find\ image\ cache[imageUuid\:\ %s]\ for\ reinit\ volume = 找不到重新初始化卷的镜像缓存[imageUuid:{0}] the\ volume[uuid\:%s,\ name\:%s]\ is\ not\ deleted\ yet,\ can't\ expunge\ it = 硬盘[uuid:{0}, name:{1}]仍未被删除,不能清除该硬盘 volume[uuid%s]\ should\ be\ attached. = 应附加卷[uuid{0}]。 @@ -3368,6 +3373,7 @@ get\ primaryStorage\ %s\ type\ failed = 获取PrimaryStorage{0}类型失败 primaryStorage\ type\ [%s]\ not\ support\ shared\ volume\ yet = 主存储类型[{0}]尚不支持共享硬盘 the\ image[uuid\:%s,\ name\:%s]\ has\ been\ deleted\ on\ all\ backup\ storage = 镜像[uuid:{0}, name:{1}]已经从所有的镜像服务器中删除 cannot\ find\ a\ backup\ storage\ on\ which\ the\ image[uuid\:%s]\ is\ that\ satisfies\ all\ conditions\ of\:\ 1.\ has\ state\ Enabled\ 2.\ has\ status\ Connected.\ 3\ has\ attached\ to\ zone\ in\ which\ primary\ storage[uuid\:%s]\ is = 无法找到一个镜像[uuid:{0}]所在的镜像服务器符合全部的下列条件: 状态启动[state:Enabled],已连接[status:Connected],被挂载到主存储[uuid:{1}]所在的数据中心中 +primary\ storage\ uuid\ conflict,\ the\ primary\ storage\ specified\ by\ the\ disk\ offering\ are\ %s,\ and\ the\ primary\ storage\ specified\ in\ the\ creation\ parameter\ is\ %s = 主存储组冲突,硬盘规格指定的数据存储组是 {0},而创建参数指定的数据存储组是 {1} target\ volume\ is\ expunged\ during\ volume\ creation = 目标卷在卷创建过程中被删除 there\ should\ not\ be\ more\ than\ one\ %s\ implementation. = 不允许超过一种实现 @@ -3588,7 +3594,77 @@ failed\ to\ delete\ hostKernelInterface[uuid\:%s]\ on\ the\ host[uuid\:%s],\ %s failed\ to\ refresh\ host\ kernel\ interface\ on\ host[uuid\:%s],\ %s = failed\ to\ get\ the\ host\ interface\ for\ the\ managementIp[%s] = -# In Module: woodpecker +# In Module: volumebackup +bandWidth\ must\ be\ a\ positive\ number = 带宽必须为正数 +missing\ 'retentionType'\ in\ job\ parameters = parameters中缺少retentionType参数 +missing\ 'retentionValue'\ in\ job\ parameters = parameter中缺少retentionValue参数 +missing\ 'backupStorageUuids'\ in\ job\ parameters = parameter中缺少backupStorageUuids参数 +job\ parameter\ 'backupStorageUuids'\ is\ empty = parameter中backupStorageUuids为空 +unexpected\ backup\ storage\ uuid\:\ %s = 错误的镜像服务器uuid: {0} +missing\ 'remoteRetentionType'\ in\ job\ parameters = 作业参数中缺少“ RemoteRetentionType ” +missing\ 'remoteRetentionValue'\ in\ job\ parameters = 作业参数中缺少“ RemoteRetentionValue ” +missing\ job\ parameters = 缺少parameters参数 +No\ available\ backup\ storage\ found,\ skip\ this\ job = 找不到可用的备份存储,请跳过此作业 +database\ backup[uuid%s]\ has\ not\ been\ exported\ from\ backupStorage[uuid\:%s] = 数据库备份[uuid{0}]尚未从备份存储[uuid:{1}]中导出 +database\ backup[uuid%s]\ has\ been\ exported\ from\ backupStorage[uuid\:%s] = 数据库备份[uuid{0}]已从备份存储[uuid:{1}]中导出 +do\ not\ allow\ cover\ database\ from\ backup = 不允许从备份中覆盖数据库 +installPath\ and\ bsUrl\ are\ both\ need = InstallPath和BSURL都是必需 +databaseBackup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = DatabaseBackup[uuid:{0}]未启用且未就绪 +illegal\ url[%s],\ correct\ example\ is\ ssh\://username\:password@hostname[\:sshPort]/path = 非法URL[{0}],正确示例为SSH://username:password@hostname[:sshport]/path +database\ backup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = 数据库备份[uuid:{0}]未启用且未就绪 +One\ of\ the\ backup\ storage[uuids\:\ %s,\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = 镜像服务器[uuid: {0}]处于状态{1}, 无法执行同步操作 +database\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = 未在备份存储[uuid:{1}]中找到数据库备份[uuid:{0}] +sync\ task\ failed. = 同步失败 +unexpected\ task\ status\:\ %s = 错误的任务状态{0} +database\ backup\ [uuid\:%s]\ is\ not\ existed\ yet = 数据库备份[uuid:{0}]尚不存在 +backup\ storage[uuid\:%s]\ is\ not\ enabled\ and\ connected = 备份存储[uuid:{0}]未启用和连接 +not\ pass\ the\ restore\ security\ check\:\\n%s = 未通过还原安全检查:\\n{0} +cannot\ get\ free\ port\ to\ listen = 无法获取空闲端口以进行侦听 +database\ backup\ version[%s]\ is\ not\ match\ currently\ version[%s] = 数据库备份版本[{0}]与当前版本[{1}]不匹配 +cannot\ ssh\ peer\ node\ via\ sshkey,\ please\ check\ connection = 无法通过SSHKEY进行SSH对等节点,请检查连接 +please\ stop\ other\ node\ first! = 请先停止其他节点! +current\ backup\ storage\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = 当前镜像服务器状态[{0}]不能处理消息[{1}],仅当镜像服务器处于{2}时才能处理该消息 +Unexpected\ backup\ storage[type\:%s,uuid\:%s] = 错误的镜像服务器[type:{0}, uuid:{1}] +Can\ not\ create\ volume\ backup\ for\ shareable\ volume[uuid\:%s] = 无法给共享云盘[uuid:{0}]创建云盘备份 +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ it\ is\ not\ attached\ to\ any\ vm = 无法给云盘[uuid:{0}]创建云盘备份,因为它未加载到虚拟机上 +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ the\ vm\ is\ not\ in\ state[%s,\ %s] = 无法给云盘[uuid:{0}]创建云盘备份,因为加载到的虚拟机并不处于以下状态[{1}, {2}] +Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ its\ attached\ volume\ is\ not\ in\ state[%s,\ %s] = 无法给云盘[uuid:{0}]创建云盘备份,因为加载到的虚拟机并不处于以下状态[{1}, {2}] +Volume[uuid\:%s]\ is\ not\ root\ volume = 卷[uuid:{0}]不是根卷 +Failed\ to\ create\ backups\ for\ VM[uuid\:%s],\ because\ it\ is\ not\ in\ state[%s,\ %s] = 无法为VM[uuid:{0}]创建备份,因为它未处于状态[{1},{2}] +The\ resource[uuid\:\ %s]\ has\ already\ created\ a\ cdp\ task,\ cannot\ create\ the\ backup\ job\ at\ the\ same\ time. = 资源[uuid:{0}]已创建CDP任务,无法同时创建备份作业。 +No\ volume\ backup\ found\ for\ group\ uuid\:\ %s = 未找到组uuid为{0}的卷备份 +root\ volume\ backup\ of\ group[uuid\:%s]\ not\ found = 未找到组[uuid:{0}]的根卷备份 +Current\ vm[uuid\:\ %s]\ of\ the\ volume[uuid\:\ %s]\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = 卷[uuid:{1}]的当前虚拟机[uuid:{0}]不再是用于备份的虚拟机[UuId:{2}] +instanceOfferingUuid\ or\ cpuNum\ and\ memorySize\ must\ be\ set = +cannot\ specify\ primary\ storage\ which\ attached\ different\ cluster. = 无法指定连接到其他集群的主存储。 +cannot\ create\ vm\ from\ volume\ backup[uuid\:%s]\ which\ is\ not\ root\ volume\ backup = +volume\ backup[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = 云盘备份[uuid:{0}]处于{1}状态,无法用于恢复云盘 +original\ volume\ for\ backup[uuid\:%s]\ has\ been\ deleted,\ cannot\ revert\ volume\ to\ it = 云盘备份[uuid:{0}]已经被删除,无法用于恢复云盘 +original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ vm[uuid\:%s] = 原始卷[uuid:{0}](用于备份[uuid:{1}])不再连接到虚拟机[uuid:{2}] +VM\ not\ found\ with\ volume\ backup[uuid\:%s] = 找不到和云盘备份[uuid:{0}]对应的虚拟机 +VM\ is\ not\ in\ stopped\ state\:\ %s = 当前虚拟机状态并不是停止状态:{0} +No\ available\ backup\ storage\ found = 没有可用的镜像服务器 +cannot\ find\ volume\ backup[uuid\:%s] = +the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = +Operation\ not\ supported\ on\ shared\ volume = 共享云盘不支持该操作 +No\ VM\ found\ for\ volume[uuid\:%s] = 找不到和云盘[uuid:{0}]对应的虚拟机 +No\ VM\ found\ with\ root\ volume\ uuid\:\ %s = 找不到根卷uuid为{0}的虚拟机 +failed\ to\ create\ image\ from\ backup\ %s = 无法从备份{0}创建镜像 +sync\ volume\ backup\ metadata\ file\ in\ image\ store[uuid\:%s]\ meet\ I/O\ error\:\ %s = 同步卷备份元数据文件(位于镜像存储[uuid:{0}]中)遇到I/O错误:{1} +Current\ vm[uuid\:\ %s]\ of\ the\ backup\ volume\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = 备份卷的当前虚拟机[uuid:{0}]不再是用于备份的虚拟机[uuid:{1}] +No\ VolumeBackupFactory\ of\ type[%s]\ found = 未找到类型为[{0}]的VolumeBackupFactory +One\ of\ the\ backup\ storage[uuid\:\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = 镜像服务器[uuid: {0}]处于状态{1}, 无法执行同步操作 +Volume\ backup[uuid\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = 在镜像服务器[uuid:{1}]上找不到云盘备份[uuid:{0}] +volume\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = 在镜像服务器[uuid:{1}]上找不到云盘备份[uuid:{0}] +No\ volume\ backups\ found\ with\ group\ uuid\:\ %s = 未找到组uuid为{0}的卷备份 +Root\ volume\ missing\ within\ group\ uuid\:\ %s = 组uuid中缺少根卷:{0} +Multiple\ root\ volumes\ found\ within\ group\ uuid\:\ %s = 在组uuid中找到多个根卷:{0} +No\ permission\ to\ volume\ backups\ within\ group\ uuid\:\ %s = 对组uuid{0}中的卷备份没有权限 +Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = 未在任何备份存储上找到卷备份[uuid:{0}] +degree\ [%s]\ should\ be\ a\ positive\ number = 度[{0}]应为正数 +invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = 类型[{0}]无效,应为[NFS,sshfs,NBD] +invalid\ url[%s],\ should\ be\ hostname\:/path = URL[{0}]无效,应为hostname:/path +volume\ backup\ metadata\ operation\ failure,\ because\ %s = 卷备份元数据操作失败,原因是{0} # In Module: vpc Network\ [uuid\:\ %s]\ does't\ not\ have\ IPsec\ service = 网络[uuid: {0}]没有IPsec服务 @@ -3753,6 +3829,8 @@ ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = %s\:is\ not\ ipv4 = vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = 物理机[uuid : {0}]在vxlan资源池[uuid : {1}]中隧道端点地址已经配置 +# In Module: woodpecker + # In Module: xdragon xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = 神龙服务器不支持使用ISO镜像创建虚拟机。 @@ -3907,75 +3985,3 @@ GreaterThan = 大于 LessThan = 小于 LessThanOrEqualTo = 小于等于 resource[%s]\ doesn't\ support\ zwatch\ return\ with\ clause = 资源[{0}]不支持ZWatch Return WITH子句 - -# In Module: volumebackup -bandWidth\ must\ be\ a\ positive\ number = 带宽必须为正数 -missing\ 'retentionType'\ in\ job\ parameters = parameters中缺少retentionType参数 -missing\ 'retentionValue'\ in\ job\ parameters = parameter中缺少retentionValue参数 -missing\ 'backupStorageUuids'\ in\ job\ parameters = parameter中缺少backupStorageUuids参数 -job\ parameter\ 'backupStorageUuids'\ is\ empty = parameter中backupStorageUuids为空 -unexpected\ backup\ storage\ uuid\:\ %s = 错误的镜像服务器uuid: {0} -missing\ 'remoteRetentionType'\ in\ job\ parameters = 作业参数中缺少“ RemoteRetentionType ” -missing\ 'remoteRetentionValue'\ in\ job\ parameters = 作业参数中缺少“ RemoteRetentionValue ” -missing\ job\ parameters = 缺少parameters参数 -No\ available\ backup\ storage\ found,\ skip\ this\ job = 找不到可用的备份存储,请跳过此作业 -database\ backup[uuid%s]\ has\ not\ been\ exported\ from\ backupStorage[uuid\:%s] = 数据库备份[uuid{0}]尚未从备份存储[uuid:{1}]中导出 -database\ backup[uuid%s]\ has\ been\ exported\ from\ backupStorage[uuid\:%s] = 数据库备份[uuid{0}]已从备份存储[uuid:{1}]中导出 -do\ not\ allow\ cover\ database\ from\ backup = 不允许从备份中覆盖数据库 -installPath\ and\ bsUrl\ are\ both\ need = InstallPath和BSURL都是必需 -databaseBackup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = DatabaseBackup[uuid:{0}]未启用且未就绪 -illegal\ url[%s],\ correct\ example\ is\ ssh\://username\:password@hostname[\:sshPort]/path = 非法URL[{0}],正确示例为SSH://username:password@hostname[:sshport]/path -database\ backup[uuid\:%s]\ is\ not\ Enabled\ and\ Ready = 数据库备份[uuid:{0}]未启用且未就绪 -One\ of\ the\ backup\ storage[uuids\:\ %s,\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = 镜像服务器[uuid: {0}]处于状态{1}, 无法执行同步操作 -database\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = 未在备份存储[uuid:{1}]中找到数据库备份[uuid:{0}] -sync\ task\ failed. = 同步失败 -unexpected\ task\ status\:\ %s = 错误的任务状态{0} -database\ backup\ [uuid\:%s]\ is\ not\ existed\ yet = 数据库备份[uuid:{0}]尚不存在 -backup\ storage[uuid\:%s]\ is\ not\ enabled\ and\ connected = 备份存储[uuid:{0}]未启用和连接 -not\ pass\ the\ restore\ security\ check\:\\n%s = 未通过还原安全检查:\\n{0} -cannot\ get\ free\ port\ to\ listen = 无法获取空闲端口以进行侦听 -database\ backup\ version[%s]\ is\ not\ match\ currently\ version[%s] = 数据库备份版本[{0}]与当前版本[{1}]不匹配 -cannot\ ssh\ peer\ node\ via\ sshkey,\ please\ check\ connection = 无法通过SSHKEY进行SSH对等节点,请检查连接 -please\ stop\ other\ node\ first! = 请先停止其他节点! -current\ backup\ storage\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = 当前镜像服务器状态[{0}]不能处理消息[{1}],仅当镜像服务器处于{2}时才能处理该消息 -Unexpected\ backup\ storage[type\:%s,uuid\:%s] = 错误的镜像服务器[type:{0}, uuid:{1}] -Can\ not\ create\ volume\ backup\ for\ shareable\ volume[uuid\:%s] = 无法给共享云盘[uuid:{0}]创建云盘备份 -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ it\ is\ not\ attached\ to\ any\ vm = 无法给云盘[uuid:{0}]创建云盘备份,因为它未加载到虚拟机上 -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ the\ vm\ is\ not\ in\ state[%s,\ %s] = 无法给云盘[uuid:{0}]创建云盘备份,因为加载到的虚拟机并不处于以下状态[{1}, {2}] -Failed\ to\ create\ volume\ backup\ for\ volume[uuid\:%s],\ because\ its\ attached\ volume\ is\ not\ in\ state[%s,\ %s] = 无法给云盘[uuid:{0}]创建云盘备份,因为加载到的虚拟机并不处于以下状态[{1}, {2}] -Volume[uuid\:%s]\ is\ not\ root\ volume = 卷[uuid:{0}]不是根卷 -Failed\ to\ create\ backups\ for\ VM[uuid\:%s],\ because\ it\ is\ not\ in\ state[%s,\ %s] = 无法为VM[uuid:{0}]创建备份,因为它未处于状态[{1},{2}] -The\ resource[uuid\:\ %s]\ has\ already\ created\ a\ cdp\ task,\ cannot\ create\ the\ backup\ job\ at\ the\ same\ time. = 资源[uuid:{0}]已创建CDP任务,无法同时创建备份作业。 -No\ volume\ backup\ found\ for\ group\ uuid\:\ %s = 未找到组uuid为{0}的卷备份 -root\ volume\ backup\ of\ group[uuid\:%s]\ not\ found = 未找到组[uuid:{0}]的根卷备份 -Current\ vm[uuid\:\ %s]\ of\ the\ volume[uuid\:\ %s]\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = 卷[uuid:{1}]的当前虚拟机[uuid:{0}]不再是用于备份的虚拟机[UuId:{2}] -instanceOfferingUuid\ or\ cpuNum\ and\ memorySize\ must\ be\ set = -cannot\ specify\ primary\ storage\ which\ attached\ different\ cluster. = 无法指定连接到其他集群的主存储。 -cannot\ create\ vm\ from\ volume\ backup[uuid\:%s]\ which\ is\ not\ root\ volume\ backup = -volume\ backup[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = 云盘备份[uuid:{0}]处于{1}状态,无法用于恢复云盘 -original\ volume\ for\ backup[uuid\:%s]\ has\ been\ deleted,\ cannot\ revert\ volume\ to\ it = 云盘备份[uuid:{0}]已经被删除,无法用于恢复云盘 -original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ vm[uuid\:%s] = 原始卷[uuid:{0}](用于备份[uuid:{1}])不再连接到虚拟机[uuid:{2}] -VM\ not\ found\ with\ volume\ backup[uuid\:%s] = 找不到和云盘备份[uuid:{0}]对应的虚拟机 -VM\ is\ not\ in\ stopped\ state\:\ %s = 当前虚拟机状态并不是停止状态:{0} -No\ available\ backup\ storage\ found = 没有可用的镜像服务器 -cannot\ find\ volume\ backup[uuid\:%s] = -the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = -Operation\ not\ supported\ on\ shared\ volume = 共享云盘不支持该操作 -No\ VM\ found\ for\ volume[uuid\:%s] = 找不到和云盘[uuid:{0}]对应的虚拟机 -No\ VM\ found\ with\ root\ volume\ uuid\:\ %s = 找不到根卷uuid为{0}的虚拟机 -failed\ to\ create\ image\ from\ backup\ %s = 无法从备份{0}创建镜像 -sync\ volume\ backup\ metadata\ file\ in\ image\ store[uuid\:%s]\ meet\ I/O\ error\:\ %s = 同步卷备份元数据文件(位于镜像存储[uuid:{0}]中)遇到I/O错误:{1} -Current\ vm[uuid\:\ %s]\ of\ the\ backup\ volume\ is\ no\ longer\ the\ vm[uuid\:\ %s]\ that\ was\ used\ for\ backup = 备份卷的当前虚拟机[uuid:{0}]不再是用于备份的虚拟机[uuid:{1}] -No\ VolumeBackupFactory\ of\ type[%s]\ found = 未找到类型为[{0}]的VolumeBackupFactory -One\ of\ the\ backup\ storage[uuid\:\ %s]\ is\ in\ the\ state\ of\ %s,\ can\ not\ do\ sync\ operation = 镜像服务器[uuid: {0}]处于状态{1}, 无法执行同步操作 -Volume\ backup[uuid\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = 在镜像服务器[uuid:{1}]上找不到云盘备份[uuid:{0}] -volume\ backup[uuid\:%s]\ not\ found\ in\ backup\ storage[uuid\:%s] = 在镜像服务器[uuid:{1}]上找不到云盘备份[uuid:{0}] -No\ volume\ backups\ found\ with\ group\ uuid\:\ %s = 未找到组uuid为{0}的卷备份 -Root\ volume\ missing\ within\ group\ uuid\:\ %s = 组uuid中缺少根卷:{0} -Multiple\ root\ volumes\ found\ within\ group\ uuid\:\ %s = 在组uuid中找到多个根卷:{0} -No\ permission\ to\ volume\ backups\ within\ group\ uuid\:\ %s = 对组uuid{0}中的卷备份没有权限 -Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = 未在任何备份存储上找到卷备份[uuid:{0}] -degree\ [%s]\ should\ be\ a\ positive\ number = 度[{0}]应为正数 -invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = 类型[{0}]无效,应为[NFS,sshfs,NBD] -invalid\ url[%s],\ should\ be\ hostname\:/path = URL[{0}]无效,应为hostname:/path -volume\ backup\ metadata\ operation\ failure,\ because\ %s = 卷备份元数据操作失败,原因是{0} diff --git a/conf/i18n_json/i18n_cdp.json b/conf/i18n_json/i18n_cdp.json index ba524769908..b84cb12d431 100644 --- a/conf/i18n_json/i18n_cdp.json +++ b/conf/i18n_json/i18n_cdp.json @@ -157,15 +157,6 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageApiInterceptor.java" }, - { - "raw": "Unexpected VM state: %s", - "en_US": "Unexpected VM state: {0}", - "zh_CN": "意外的VM状态:{0}", - "arguments": [ - "state" - ], - "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageApiInterceptor.java" - }, { "raw": "Backup storage not found[uuid: %s]", "en_US": "Backup storage not found[uuid: {0}]", @@ -202,15 +193,6 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageApiInterceptor.java" }, - { - "raw": "unexpected task type: %s", - "en_US": "unexpected task type: {0}", - "zh_CN": "意外的任务类型:{0}", - "arguments": [ - "msg.getTaskType()" - ], - "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageApiInterceptor.java" - }, { "raw": "\u0027%s\u0027(%d) should be larger than \u0027%s\u0027(%d)", "en_US": "\u0027{0}\u0027({1}) should be larger than \u0027{2}\u0027({3})", @@ -294,15 +276,6 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageImpl.java" }, - { - "raw": "%s", - "en_US": "{0}", - "zh_CN": "{0}", - "arguments": [ - "ret.getError()" - ], - "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageImpl.java" - }, { "raw": "hostname not found for backup storage[uuid: %s]", "en_US": "hostname not found for backup storage[uuid: {0}]", @@ -339,6 +312,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" }, + { + "raw": "%s", + "en_US": "{0}", + "zh_CN": "{0}", + "arguments": [ + "vo.getJobResult()" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" + }, { "raw": "create-vm job cancelled", "en_US": "create-vm job cancelled", @@ -391,6 +373,15 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" }, + { + "raw": "Unexpected VM state: %s", + "en_US": "Unexpected VM state: {0}", + "zh_CN": "意外的VM状态:{0}", + "arguments": [ + "state" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" + }, { "raw": "No CDP task found for VM: %s", "en_US": "No CDP task found for VM: {0}", @@ -436,6 +427,15 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" }, + { + "raw": "unexpected task type: %s", + "en_US": "unexpected task type: {0}", + "zh_CN": "意外的任务类型:{0}", + "arguments": [ + "taskVO.getTaskType()" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" + }, { "raw": "The VM[%s] for volume[%s] is running CDP, cannot resize now.", "en_US": "The VM[{0}] for volume[{1}] is running CDP, cannot resize now.", @@ -464,16 +464,6 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CreateVmFromCdpBackupLongJob.java" }, - { - "raw": "waiting host[uuid:%s] and backupStorage[uuid:%s] to be Connected...", - "en_US": "waiting host[uuid:{0}] and backupStorage[uuid:{1}] to be Connected...", - "zh_CN": "正在等待要连接的主机[uuid:{0}]和备份存储[uuid:{1}]..", - "arguments": [ - "hostUuid", - "bsUuid" - ], - "fileName": "src/main/java/org/zstack/storage/cdp/CreateVmFromCdpBackupLongJob.java" - }, { "raw": "failed to find cdp task[uuid:%s] ", "en_US": "failed to find cdp task[uuid:{0}] ", @@ -501,6 +491,13 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/KvmCdpVmLiveCreateFlowChain.java" }, + { + "raw": "no volume records found from VM backup", + "en_US": "no volume records found from VM backup", + "zh_CN": "未从虚拟机备份中找到卷记录", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/cdp/KvmCdpVmLiveCreateFlowChain.java" + }, { "raw": "multiple root volumes found from CDP backup %s:%d", "en_US": "multiple root volumes found from CDP backup {0}:{1}", @@ -550,13 +547,6 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/KvmCdpVmLiveRestoreFlowChain.java" }, - { - "raw": "no volume records found from VM backup", - "en_US": "no volume records found from VM backup", - "zh_CN": "未从虚拟机备份中找到卷记录", - "arguments": [], - "fileName": "src/main/java/org/zstack/storage/cdp/KvmCdpVmLiveRestoreFlowChain.java" - }, { "raw": "no root volume found from VM backup", "en_US": "no root volume found from VM backup", @@ -636,6 +626,16 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/RevertVmFromCdpBackupMsgLongJob.java" }, + { + "raw": "waiting host[uuid:%s] and backupStorage[uuid:%s] to be Connected...", + "en_US": "waiting host[uuid:{0}] and backupStorage[uuid:{1}] to be Connected...", + "zh_CN": "正在等待要连接的主机[uuid:{0}]和备份存储[uuid:{1}]..", + "arguments": [ + "apiMessage.getHostUuid()", + "apiMessage.getBackupStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/RevertVmFromCdpBackupMsgLongJob.java" + }, { "raw": "No CDP task found for VM[uuid: %s]", "en_US": "No CDP task found for VM[uuid: {0}]", diff --git a/conf/i18n_json/i18n_compute.json b/conf/i18n_json/i18n_compute.json index f6613cd1085..c7b0d69fb9a 100644 --- a/conf/i18n_json/i18n_compute.json +++ b/conf/i18n_json/i18n_compute.json @@ -1840,6 +1840,16 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceHelper.java" }, + { + "raw": "host[uuid:%s] is specified but it is not in zone[uuid:%s], can not create vm from it", + "en_US": "host[uuid:{0}] is specified but it is not in zone[uuid:{1}], can not create vm from it", + "zh_CN": "", + "arguments": [ + "msg.getHostUuid()", + "expectZoneUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceHelper.java" + }, { "raw": "host[uuid:%s] is specified but it\u0027s Disabled, can not create vm from it", "en_US": "host[uuid:{0}] is specified but it\u0027s Disabled, can not create vm from it", diff --git a/conf/i18n_json/i18n_core.json b/conf/i18n_json/i18n_core.json index 8f82d418f56..0e6d2e9561e 100644 --- a/conf/i18n_json/i18n_core.json +++ b/conf/i18n_json/i18n_core.json @@ -15,15 +15,6 @@ "arguments": [], "fileName": "src/main/java/org/zstack/core/Platform.java" }, - { - "raw": "fail to create new File[%s]", - "en_US": "fail to create new File[{0}]", - "zh_CN": "无法创建新文件[{0}]", - "arguments": [ - "invFile" - ], - "fileName": "src/main/java/org/zstack/core/ansible/AnsibleFacadeImpl.java" - }, { "raw": "failed to run ansible: failed to find target host", "en_US": "failed to run ansible: failed to find target host", @@ -45,6 +36,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/core/ansible/AnsibleRunner.java" }, + { + "raw": "fail to create new File[%s]", + "en_US": "fail to create new File[{0}]", + "zh_CN": "无法创建新文件[{0}]", + "arguments": [ + "hostsFile" + ], + "fileName": "src/main/java/org/zstack/core/ansible/PrepareAnsible.java" + }, { "raw": "cannot check md5sum of files in the folder[%s]", "en_US": "cannot check md5sum of files in the folder[{0}]", diff --git a/conf/i18n_json/i18n_crypto.json b/conf/i18n_json/i18n_crypto.json index 72fe2bfe366..794e9c8067a 100644 --- a/conf/i18n_json/i18n_crypto.json +++ b/conf/i18n_json/i18n_crypto.json @@ -1,11 +1,4 @@ [ - { - "raw": "the identity authentication does not specify the resource pool to provide the service", - "en_US": "the identity authentication does not specify the resource pool to provide the service", - "zh_CN": "身份认证未指定提供服务的资源池", - "arguments": [], - "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" - }, { "raw": "failed to find model for secretResourcePool [%s]", "en_US": "failed to find model for secretResourcePool [{0}]", @@ -25,13 +18,6 @@ ], "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" }, - { - "raw": "failed to connect remote crypto server", - "en_US": "failed to connect remote crypto server", - "zh_CN": "连接到远程密钥(密评)服务器失败", - "arguments": [], - "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" - }, { "raw": "parseCertificate failed: %s", "en_US": "parseCertificate failed: {0}", @@ -41,20 +27,6 @@ ], "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" }, - { - "raw": "additional authentication server raise an error", - "en_US": "additional authentication server raise an error", - "zh_CN": "", - "arguments": [], - "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" - }, - { - "raw": "additional authentication failed", - "en_US": "additional authentication failed", - "zh_CN": "", - "arguments": [], - "fileName": "src/main/java/org/zstack/crypto/auth/AbstractCryptoAuthenticationFacade.java" - }, { "raw": "failed to parse certificate", "en_US": "failed to parse certificate", @@ -89,6 +61,34 @@ ], "fileName": "src/main/java/org/zstack/crypto/auth/CryptoAuthenticationHelper.java" }, + { + "raw": "the identity authentication does not specify the resource pool to provide the service", + "en_US": "the identity authentication does not specify the resource pool to provide the service", + "zh_CN": "身份认证未指定提供服务的资源池", + "arguments": [], + "fileName": "src/main/java/org/zstack/crypto/auth/CryptoAuthenticationManagerImpl.java" + }, + { + "raw": "failed to connect remote crypto server", + "en_US": "failed to connect remote crypto server", + "zh_CN": "连接到远程密钥(密评)服务器失败", + "arguments": [], + "fileName": "src/main/java/org/zstack/crypto/auth/CryptoAuthenticationManagerImpl.java" + }, + { + "raw": "additional authentication server raise an error", + "en_US": "additional authentication server raise an error", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/crypto/auth/CryptoAuthenticationManagerImpl.java" + }, + { + "raw": "additional authentication failed", + "en_US": "additional authentication failed", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/crypto/auth/CryptoAuthenticationManagerImpl.java" + }, { "raw": "failed to generate credential", "en_US": "failed to generate credential", diff --git a/conf/i18n_json/i18n_mevoco.json b/conf/i18n_json/i18n_mevoco.json index 5da7b41d78f..ed4f5876d5b 100644 --- a/conf/i18n_json/i18n_mevoco.json +++ b/conf/i18n_json/i18n_mevoco.json @@ -550,15 +550,6 @@ ], "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" }, - { - "raw": "host[uuid:%s] can not find", - "en_US": "host[uuid:{0}] can not find", - "zh_CN": "找不到主机[uuid:{0}]", - "arguments": [ - "msg.getHostUuid()" - ], - "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" - }, { "raw": "kvm host[uuid:%s, name:%s, ip:%s] doesn\u0027t not support live snapshot. please stop vm[uuid:%s] and try again", "en_US": "kvm host[uuid:{0}, name:{1}, ip:{2}] doesn\u0027t not support live snapshot. please stop vm[uuid:{3}] and try again", @@ -761,6 +752,15 @@ ], "fileName": "src/main/java/org/zstack/compute/host/MevocoHostManagerImpl.java" }, + { + "raw": "host[uuid:%s] can not find", + "en_US": "host[uuid:{0}] can not find", + "zh_CN": "找不到主机[uuid:{0}]", + "arguments": [ + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/host/MevocoHostManagerImpl.java" + }, { "raw": "ovs cpu pinning resource config:[%s] format error.", "en_US": "ovs cpu pinning resource config:[{0}] format error.", @@ -1622,6 +1622,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" }, + { + "raw": "the vm scheduling group has already had a vms Affinitive to Hosts scheduling policy attached", + "en_US": "the vm scheduling group has already had a vms Affinitive to Hosts scheduling policy attached, you cannot attach a vm antiaffinity from each other scheduling rule to the group again.", + "zh_CN": "该虚拟机调度组已绑定聚集虚拟机调度策略,不可绑定其它虚拟机调度策略", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, { "raw": "can not satisfied vm scheduling rule group conditions", "en_US": "can not satisfied vm scheduling rule group conditions", @@ -2065,12 +2072,22 @@ "fileName": "src/main/java/org/zstack/license/LicenseManagerImpl.java" }, { - "raw": "Insufficient CPU number licensed. Your %s license permits the number of CPUs: %d.", - "en_US": "Insufficient CPU number licensed. Your {0} license permits the number of CPUs: {1}.", - "zh_CN": "许可证的 CPU 配额数量不足。您的 {0} 许可证允许的 CPU 数量为 {1}", + "raw": "Insufficient CPU Socket number licensed. Your %s license permits the number of CPU Sockets: %d.", + "en_US": "Insufficient CPU Socket number licensed. Your {0} license permits the number of CPU Sockets: {1}.", + "zh_CN": "许可证的 CPU Socket 数配额数量不足。您的 {0} 许可证允许的 CPU Socket 数量为 {1}", "arguments": [ "addon.getModules()", - "addon.getCpuNum()" + "addon.getCpuSocketsNum()" + ], + "fileName": "src/main/java/org/zstack/license/LicenseManagerImpl.java" + }, + { + "raw": "Insufficient CPU Core number licensed. Your %s license permits the number of CPU Cores: %d.", + "en_US": "Insufficient CPU Core number licensed. Your {0} license permits the number of CPU Cores: {1}.", + "zh_CN": "许可证的 CPU 核心数配额数量不足。您的 {0} 许可证允许的 CPU 核心数量为 {1}", + "arguments": [ + "addon.getModules()", + "addon.getCpuCoresNum()" ], "fileName": "src/main/java/org/zstack/license/LicenseManagerImpl.java" }, @@ -2215,8 +2232,19 @@ "zh_CN": "{0} 许可证配额为 {1},已使用 {2} 个 CPU 核数。请上传新的许可证或删除多余的主机", "arguments": [ "licenseType", - "platformLicenseCpu", - "x86HostCpu + otherHostCpu" + "platformLicenseCpuSockets", + "x86HostCpuSockets + otherHostCpuSockets" + ], + "fileName": "src/main/java/org/zstack/license/compute/LicenseCapacity.java" + }, + { + "raw": "hijacked detected. Your license[%s] permits %d CPU cores, but consumed %d. You can either apply a new license or delete additional hosts", + "en_US": "hijacked detected. Your license[{0}] permits {1} CPU cores, but consumed {2}. You can either apply a new license or delete additional hosts", + "zh_CN": "", + "arguments": [ + "licenseType", + "platformLicenseCpuCores", + "x86HostCpuCores + otherHostCpuCores" ], "fileName": "src/main/java/org/zstack/license/compute/LicenseCapacity.java" }, diff --git a/conf/i18n_json/i18n_storage.json b/conf/i18n_json/i18n_storage.json index 8fa5afab0fb..f72ad0b8b2d 100644 --- a/conf/i18n_json/i18n_storage.json +++ b/conf/i18n_json/i18n_storage.json @@ -742,16 +742,6 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/volume/DownloadIsoForVmExtension.java" }, - { - "raw": "primary storage uuid conflict, the primary storage specified by the disk offering are %s, and the primary storage specified in the creation parameter is %s", - "en_US": "primary storage uuid conflict, the primary storage specified by the disk offering are {0}, and the primary storage specified in the creation parameter is {1}", - "zh_CN": "主存储组冲突,硬盘规格指定的数据存储组是 {0},而创建参数指定的数据存储组是 {1}", - "arguments": [ - "requiredPrimaryStorageUuids", - "msg.getPrimaryStorageUuid()" - ], - "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" - }, { "raw": "volume[uuid:%s] is not in status Ready, current is %s, can\u0027t create snapshot", "en_US": "volume[uuid:{0}] is not in status Ready, current is {1}, can\u0027t create snapshot", @@ -975,6 +965,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "Can not attach volume to vm runs on host[uuid: %s] which is disconnected with volume\u0027s storage[uuid: %s]", + "en_US": "Can not attach volume to vm runs on host[uuid: {0}] which is disconnected with volume\u0027s storage[uuid: {1}]", + "zh_CN": "", + "arguments": [ + "hostUuid", + "volumeVO.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "it\u0027s not allowed to backup root volume, uuid:%s", "en_US": "it\u0027s not allowed to backup root volume, uuid:{0}", @@ -1106,6 +1106,13 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "invalid disk states", + "en_US": "invalid disk states", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "cannot find image cache[imageUuid: %s] for reinit volume", "en_US": "cannot find image cache[imageUuid: {0}] for reinit volume", @@ -1198,6 +1205,16 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeManagerImpl.java" }, + { + "raw": "primary storage uuid conflict, the primary storage specified by the disk offering are %s, and the primary storage specified in the creation parameter is %s", + "en_US": "primary storage uuid conflict, the primary storage specified by the disk offering are {0}, and the primary storage specified in the creation parameter is {1}", + "zh_CN": "主存储组冲突,硬盘规格指定的数据存储组是 {0},而创建参数指定的数据存储组是 {1}", + "arguments": [ + "requiredPrimaryStorageUuids", + "msg.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeManagerImpl.java" + }, { "raw": "target volume is expunged during volume creation", "en_US": "target volume is expunged during volume creation", From c48371f2d8e475b2c717e21b354b968beb4bfe6e Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Fri, 23 Jan 2026 14:56:42 +0800 Subject: [PATCH 04/76] [conf]: update i18n files This patch is for zsv_4.10.28 Related: ZSV-10444 Change-Id: I70636878647a7962636a64766b6c67756574756c --- conf/i18n/messages_en_US.properties | 1098 ++++++++++++++-- conf/i18n/messages_zh_CN.properties | 1102 +++++++++++++++-- conf/i18n_json/i18n_aliyun-storage.json | 117 +- conf/i18n_json/i18n_baremetal.json | 48 + conf/i18n_json/i18n_baremetal2.json | 604 +++++++++ .../i18n_json/i18n_block-primary-storage.json | 17 + conf/i18n_json/i18n_cbd.json | 30 + conf/i18n_json/i18n_cdp.json | 26 + conf/i18n_json/i18n_ceph.json | 121 ++ conf/i18n_json/i18n_cloudformation.json | 10 + conf/i18n_json/i18n_compute.json | 230 ++++ conf/i18n_json/i18n_console.json | 18 + conf/i18n_json/i18n_core.json | 9 + conf/i18n_json/i18n_crypto.json | 19 + conf/i18n_json/i18n_directory.json | 7 + conf/i18n_json/i18n_eip.json | 25 + conf/i18n_json/i18n_faulttolerance.json | 96 ++ conf/i18n_json/i18n_flatNetworkProvider.json | 18 + conf/i18n_json/i18n_guesttools.json | 2 +- conf/i18n_json/i18n_hybrid.json | 92 ++ conf/i18n_json/i18n_iam2.json | 48 + conf/i18n_json/i18n_identity.json | 38 + conf/i18n_json/i18n_image.json | 10 + conf/i18n_json/i18n_kvm.json | 129 +- conf/i18n_json/i18n_loadBalancer.json | 22 + conf/i18n_json/i18n_localstorage.json | 258 ++++ conf/i18n_json/i18n_mevoco.json | 814 ++++++++++++ conf/i18n_json/i18n_ministorage.json | 70 ++ conf/i18n_json/i18n_network.json | 149 +++ conf/i18n_json/i18n_nfsPrimaryStorage.json | 148 +++ conf/i18n_json/i18n_ovf.json | 20 +- conf/i18n_json/i18n_portForwarding.json | 24 + conf/i18n_json/i18n_portal.json | 9 + conf/i18n_json/i18n_resourceconfig.json | 11 + conf/i18n_json/i18n_rest.json | 10 + conf/i18n_json/i18n_search.json | 11 + conf/i18n_json/i18n_sftpBackupStorage.json | 10 + .../i18n_sharedMountPointPrimaryStorage.json | 70 ++ conf/i18n_json/i18n_sharedblock.json | 230 ++++ conf/i18n_json/i18n_slb.json | 281 +++++ conf/i18n_json/i18n_snmp.json | 54 + .../i18n_software-package-plugin.json | 27 + conf/i18n_json/i18n_storage-device.json | 21 + conf/i18n_json/i18n_storage.json | 260 +++- conf/i18n_json/i18n_tag2.json | 9 + conf/i18n_json/i18n_ticket.json | 7 + .../i18n_json/i18n_virtualRouterProvider.json | 135 ++ conf/i18n_json/i18n_virtualSwitchNetwork.json | 368 ++++++ conf/i18n_json/i18n_volumebackup.json | 17 + conf/i18n_json/i18n_vpc.json | 88 ++ conf/i18n_json/i18n_zbox.json | 20 + conf/i18n_json/i18n_zbs.json | 9 + conf/i18n_json/i18n_zce-x-plugin.json | 16 + conf/i18n_json/i18n_zstone-plugin.json | 9 + conf/i18n_json/i18n_zwatch.json | 10 + 55 files changed, 6820 insertions(+), 281 deletions(-) diff --git a/conf/i18n/messages_en_US.properties b/conf/i18n/messages_en_US.properties index 5c78fa135d1..199d8defb37 100755 --- a/conf/i18n/messages_en_US.properties +++ b/conf/i18n/messages_en_US.properties @@ -6,6 +6,16 @@ If\ a\ specified\ Accesskey\ is\ expected,\ the\ AccesskeyId\ and\ the\ Accesske Access\ key\ ID\ and\ secret\ cannot\ contain\ '\:' = Access key ID and secret cannot contain '':'' The\ number\ of\ access\ keys\ for\ account[uuid\=%s]\ has\ exceeded\ the\ maximum\ limit = The number of access keys for account[uuid={0}] has exceeded the maximum limit +# In Module: account-import +unable\ to\ support\ third\ party\ account\ source\ of\ type[%s] = unable to support third party account source of type[{0}] +third\ party\ user[credentials\=%s]\ has\ already\ binding\ to\ other\ account = third party user[credentials={0}] has already binding to other account +invalid\ account\ spec\:\ accountUuid\ is\ null = invalid account spec: accountUuid is null +invalid\ account\ spec\:\ failed\ to\ find\ account[uuid\=%s] = invalid account spec: failed to find account[uuid={0}] +account[uuid\=%s]\ has\ already\ binding\ other\ third\ party\ source = account[uuid={0}] has already binding other third party source +failed\ to\ import\ account\ from\ source[uuid\=%s,\ type\=%s] = failed to import account from source[uuid={0}, type={1}] +failed\ to\ unbinding\ accounts\ from\ source[uuid\=%s,\ type\=%s] = failed to unbinding accounts from source[uuid={0}, type={1}] +failed\ to\ delete\ source[uuid\=%s,\ type\=%s] = failed to delete source[uuid={0}, type={1}] + # In Module: acl not\ support\ the\ ip\ version\ %d = not support the ip version {0} %s\ duplicate/overlap\ ip\ entry\ with\ access-control-list\ group\:%s = {0} duplicate/overlap ip entry with access-control-list group:{1} @@ -21,12 +31,97 @@ domain\ and\ url\ can\ not\ both\ empty = domain and url can not both empty domain[%s]\ is\ not\ validate\ domain = domain[{0}] is not validate domain url[%s]\ is\ not\ validate\ url = url[{0}] is not validate url +# In Module: aliyun-storage +accessKey\ and\ keySecret\ must\ be\ set = accessKey and keySecret must be set +ocean\ api\ endpoint\ must\ not\ be\ null = ocean api endpoint must not be null +accessKey\ and\ keySecret\ must\ be\ set! = accessKey and keySecret must be set! +regionId\ must\ be\ set! = regionId must be set! +no\ current\ used\ key/secret\ for\ %s! = no current used key/secret for {0}! +Not\ a\ valid\ message! = Not a valid message! +%s\ failed,\ ErrorCode\:\ %s,\ ErrorMessage\:\ %s = {0} failed, ErrorCode: {1}, ErrorMessage: {2} +Device\ Not\ Ready\ in\ %d\ milli\ seconds = Device Not Ready in {0} milli seconds +snapshot\ task\ cannot\ finished\ in\ %d\ milliseconds,\ now\ progress\ is\ %d,\ status\ is\ %s = snapshot task cannot finished in {0} milliseconds, now progress is {1}, status is {2} +snapshot\ task\ status\ is\ finished\ %s = snapshot task status is finished {0} +not\ supported\ HybridClient = not supported HybridClient +arg\ 'endpoint'\ must\ be\ set\ in\ %s\ type = arg ''endpoint'' must be set in {0} type +not\ supported\ datacenter\ [%s]\ type\ here! = not supported datacenter [{0}] type here! +must\ indicate\ zoneId\ in\ private\ aliyun. = must indicate zoneId in private aliyun. +make\ ocean\ api\ signature\ string\ failed\:\ %s = make ocean api signature string failed: {0} +url(ocean\ endpoint)\ must\ be\ set\ for\ aliyun\ ebs\ backupstorage = url(ocean endpoint) must be set for aliyun ebs backupstorage +couldn't\ find\ domain\ on\ such\ oss\:\ [%s] = couldn''t find domain on such oss: [{0}] +aliyun\ ebs\ backup\ storage\ do\ not\ support\ to\ cancel\ download\ image = aliyun ebs backup storage do not support to cancel download image +no\ such\ object\ %s\ found\ in\ bucket\ %s = no such object {0} found in bucket {1} +couldn't\ find\ such\ oss\ bucket\:\ [%s] = couldn''t find such oss bucket: [{0}] +aliyun\ ebs\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = aliyun ebs backup storage do not support calculate image hash +cannot\ delete\ oss\ bucket\ [%s],\ Aliyun\ Ebs\ BackupStorage\ [%s]\ still\ existed,\ please\ delete\ it\ first. = cannot delete oss bucket [{0}], Aliyun Ebs BackupStorage [{1}] still existed, please delete it first. +cannot\ find\ device\ path\ from\ volume\:\ %s = cannot find device path from volume: {0} +aliyun\ ebs\ not\ support\ resize\ on\ running\ vm\ now. = aliyun ebs not support resize on running vm now. +iso\ [%s]\ has\ been\ attached,\ we\ can\ not\ attach\ it\ until\ detach\ it = iso [{0}] has been attached, we can not attach it until detach it +url(ocean\ endpoint)\ must\ be\ set\ for\ aliyun\ ebs\ primarystorage = url(ocean endpoint) must be set for aliyun ebs primarystorage +url\ must\ starts\ with\ http\://\ or\ https\://,\ but\ got\ %s = url must starts with http:// or https://, but got {0} +panguPartitionUuid\ or\ identityZoneUuid\ must\ be\ set. = panguPartitionUuid or identityZoneUuid must be set. +panguPartitionUuid\ [%s]\ not\ be\ matched\ with\ identityZoneUuid\ [%s] = panguPartitionUuid [{0}] not be matched with identityZoneUuid [{1}] +the\ aliyun\ ebs\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = the aliyun ebs primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume +cannot\ find\ snapshot\ from\ image\:\ %s,\ maybe\ the\ image\ has\ been\ deleted = cannot find snapshot from image: {0}, maybe the image has been deleted +ebs\ primarystorage\ cannot\ support\ decrease\ size\ now = ebs primarystorage cannot support decrease size now +create\ snapshot\ timeout,\ progress\ is\ %d = create snapshot timeout, progress is {0} +couldn’t\ find\ any\ BackupStorage\ that\ is\ connected\ and\ enabled\ for\ commiting\ volume\ [uuid\:%s] = couldn’t find any BackupStorage that is connected and enabled for commiting volume [uuid:{0}] +aliyun\ ebs\ primarystorage\ only\ support\ aliyun\ ebs\ bs,\ actually\ get\ type\:\ %s = aliyun ebs primarystorage only support aliyun ebs bs, actually get type: {0} +cannot\ delete\ identity\ zone\ [%s],\ Aliyun\ Ebs\ PrimaryStorage\ [%s]\ still\ existed,\ please\ delete\ it\ first. = cannot delete identity zone [{0}], Aliyun Ebs PrimaryStorage [{1}] still existed, please delete it first. +append\ volumeId\:\ %s,\ but\ another\ volumeId\ existed\ in\ url\:\ %s = append volumeId: {0}, but another volumeId existed in url: {1} +hostUuid\ [%s]\ already\ existed\ in\ url\:\ %s = hostUuid [{0}] already existed in url: {1} +invalid\ install\ url\:\ %s = invalid install url: {0} +cannot\ find\ devicePath\ on\ host\:\ %s = cannot find devicePath on host: {0} +invalid\ install\ path\:\ %s = invalid install path: {0} +invalid\ snapshot\ install\ path\:\ %s = invalid snapshot install path: {0} +PrimaryStorage\ [%s]\ still\ running,\ can\ not\ delete\ access\ group = PrimaryStorage [{0}] still running, can not delete access group +access\ group\ rule\ [%s]\ already\ existed\ in\ access\ group\ [%s] = access group rule [{0}] already existed in access group [{1}] +access\ group\ [%s]\ already\ existed\ in\ datacenter\ [%s] = access group [{0}] already existed in datacenter [{1}] +no\ filesystem\ [%s]\ found\ in\ region\:\ %s = no filesystem [{0}] found in region: {1} +nas\ filesystem\ existed\ in\ datacenter\:\ %s = nas filesystem existed in datacenter: {0} +some\ primary\ storage\ [%s]\ used\ this\ nas,\ can\ not\ delete\ it\ until\ delete\ the\ primary\ storage. = some primary storage [{0}] used this nas, can not delete it until delete the primary storage. +mount\ domain\ not\ valid\ after\ %d\ milliseconds,\ delete\ it... = mount domain not valid after {0} milliseconds, delete it... +no\ such\ mount\ target\ [%s]\ in\ nas\:\ %s = no such mount target [{0}] in nas: {1} +there\ are\ no\ nas\ access\ group\ existed,\ please\ create\ at\ least\ one = there are no nas access group existed, please create at least one +nas\ mount\ target\ [%s]\ existed\ in\ filesystem\:\ %s = nas mount target [{0}] existed in filesystem: {1} +the\ access\ group\ attached\ is\ already\:\ %s = the access group attached is already: {0} +no\ connected\ host\ found\ in\ the\ cluster[uuid\:%s] = no connected host found in the cluster[uuid:{0}] +AliyunNasAccessGroupVO[%s]\ is\ not\ existed,\ may\ be\ it\ has\ been\ deleted! = AliyunNasAccessGroupVO[{0}] is not existed, may be it has been deleted! +EcsVSwitchVO[%s]\ is\ not\ existed,\ may\ be\ it\ has\ been\ deleted! = EcsVSwitchVO[{0}] is not existed, may be it has been deleted! +cannot\ find\ an\ available\ host\ to\ operation\ in\ primary\ storage\:\ %s = cannot find an available host to operation in primary storage: {0} +failed\ to\ ping\ aliyun\ nas\ primary\ storage[uuid\:%s]\ from\ host[uuid\:%s],because\ %s.\ disconnect\ this\ host-ps\ connection = failed to ping aliyun nas primary storage[uuid:{0}] from host[uuid:{1}],because {2}. disconnect this host-ps connection +operation\ error,\ because\:%s = operation error, because:{0} +nas\ primary\ storage\ not\ mounted,\ please\ init\ it\ first! = nas primary storage not mounted, please init it first! +cannot\ find\ any\ BackupStorageKvmFactory\ for\ the\ type[%s] = cannot find any BackupStorageKvmFactory for the type[{0}] +cannot\ find\ host\ to\ operate\ volume\:\ [%s] = cannot find host to operate volume: [{0}] +cannot\ find\ and\ host\ to\ sync\ volume\ size\ in\ primary\:\ %s = cannot find and host to sync volume size in primary: {0} +image\ [%s]\ has\ been\ deleted,\ cannot\ reinit\ root\ volume\ from\ it = image [{0}] has been deleted, cannot reinit root volume from it +no\ available\ host\ could\ check\ mountPath! = no available host could check mountPath! +unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = unable to allocate backup storage specified by uuids: {0}, becasue: {1} +No\ backup\ storage\ to\ commit\ volume\ [uuid\:\ %s] = No backup storage to commit volume [uuid: {0}] +aliyun\ nas\ primarystorage\ only\ support\ imagestore\ bs,\ actually\ get\ type\:\ %s = aliyun nas primarystorage only support imagestore bs, actually get type: {0} +unable\ to\ commit\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = unable to commit backup storage specified by uuids: {0}, becasue: {1} +image\ [uuid\:%s]\ has\ been\ deleted = image [uuid:{0}] has been deleted +the\ volume[uuid;%s]\ is\ attached\ to\ a\ VM[uuid\:%s]\ which\ is\ in\ state\ of\ %s,\ cannot\ do\ the\ snapshot\ merge = the volume[uuid;{0}] is attached to a VM[uuid:{1}] which is in state of {2}, cannot do the snapshot merge +unable\ to\ attach\ a\ primary\ storage\ to\ cluster.\ Kvm\ host[uuid\:%s,\ name\:%s]\ in\ cluster\ has\ qemu-img\ with\ version[%s];\ but\ the\ primary\ storage\ has\ attached\ to\ a\ cluster\ that\ has\ kvm\ host[uuid\:%s],\ which\ has\ qemu-img\ with\ version[%s].\ qemu-img\ version\ greater\ than\ %s\ is\ incompatible\ with\ versions\ less\ than\ %s,\ this\ will\ causes\ volume\ snapshot\ operation\ to\ fail.\ Please\ avoid\ attaching\ a\ primary\ storage\ to\ clusters\ that\ have\ different\ Linux\ distributions,\ in\ order\ to\ prevent\ qemu-img\ version\ mismatch = unable to attach a primary storage to cluster. Kvm host[uuid:{0}, name:{1}] in cluster has qemu-img with version[{2}]; but the primary storage has attached to a cluster that has kvm host[uuid:{3}], which has qemu-img with version[{4}]. qemu-img version greater than {5} is incompatible with versions less than {6}, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch +no\ available\ host\ could\ download\ imagecache! = no available host could download imagecache! +the\ aliyun\ nas\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = the aliyun nas primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume +the\ aliyun\ nas\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ delete\ bits\ on\ primarystorage = the aliyun nas primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for delete bits on primarystorage +not\ support = not support +the\ AliyunNAS\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the AliyunNAS primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected +the\ Aliyun\ Nas\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the Aliyun Nas primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected +failed\ to\ check\ mount\ path\ on\ host\:\ %s = failed to check mount path on host: {0} +cannot\ find\ a\ host\ to\ cleanup\ image\ cache. = cannot find a host to cleanup image cache. +resource[uuid\:\ %s]\ cannot\ found = resource[uuid: {0}] cannot found +cannot\ find\ available\ host\ for\ operation\ on\ primary\ storage[uuid\:%s]. = cannot find available host for operation on primary storage[uuid:{0}]. +host\ where\ vm[uuid\:%s]\ locate\ is\ not\ Connected. = host where vm[uuid:{0}] locate is not Connected. +appName\:\ %s,\ partitionName\:\ %s\ is\ existed\ in\ identityZone\:\ %s = appName: {0}, partitionName: {1} is existed in identityZone: {2} + # In Module: aliyunproxy # In Module: applianceVm there\ is\ no\ available\ nicType\ on\ L3\ network\ [%s] = there is no available nicType on L3 network [{0}] appliance\ vm[uuid\:%s]\ is\ in\ status\ of\ %s\ that\ cannot\ make\ http\ call\ to\ %s = appliance vm[uuid:{0}] is in status of {1} that cannot make http call to {2} -operation\ error,\ because\:%s = operation error, because:{0} appliance\ vm\ %s\ stopped = appliance vm {0} stopped appliance\ vm\ %s\ reboot = appliance vm {0} reboot appliance\ vm\ %s\ reboot\ failed = appliance vm {0} reboot failed @@ -78,6 +173,7 @@ AutoScalingRuleVO[uuid\:%s]\ is\ %s,\ state\ change\ is\ not\ allowed = AutoScal # In Module: baremetal IPMI\ Address\ %s\ is\ not\ valid = IPMI Address {0} is not valid +Failed\ to\ reach\ the\ bare-metal\ chassis,\ please\ make\ sure\:\ 1.\ the\ IPMI\ connection\ is\ active;\ 2.\ the\ IPMI\ Address,\ Port,\ Username\ and\ Password\ are\ correct;\ 3.\ IPMI\ Over\ LAN\ is\ enabled\ in\ BIOS. = Failed to reach the bare-metal chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS. Baremetal\ Chassis\ of\ IPMI\ address\ %s\ and\ IPMI\ port\ %d\ has\ already\ been\ created. = Baremetal Chassis of IPMI address {0} and IPMI port {1} has already been created. Cluster[uuid\:%s]\ does\ not\ exists. = Cluster[uuid:{0}] does not exists. Cluster[uuid\:%s]\ is\ not\ a\ baremetal\ cluster. = Cluster[uuid:{0}] is not a baremetal cluster. @@ -86,6 +182,8 @@ IPMI\ Address\ and\ Port\ %s\:%d\ already\ exists. = IPMI Address and Port {0}:{ no\ usable\ baremetal\ pxeserver\ attached\ to\ cluster[uuid\:%s] = no usable baremetal pxeserver attached to cluster[uuid:{0}] baremetal\ chassis[uuid\:%s]\ is\ supposed\ to\ using\ pxeserver[uuid\:%s],\ but\ it\ was\ pxeserver[uuid\:%s]\ that\ actually\ handled\ the\ DHCP\ request = baremetal chassis[uuid:{0}] is supposed to using pxeserver[uuid:{1}], but it was pxeserver[uuid:{2}] that actually handled the DHCP request License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ baremetal. = License not found, please apply addon license for product baremetal. +Hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ baremetal\ chassis,\ but\ we\ detect\ there\ are\ %s\ in\ the\ database.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = Hijacked detected. Your license[{0}] permits {1} baremetal chassis, but we detect there are {2} in the database. You can either delete additional chassis or apply a new license. +Insufficient\ baremetal\ chassis\ number\ licensed.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = Insufficient baremetal chassis number licensed. You can either delete additional chassis or apply a new license. failed\ to\ delete\ baremetal\ chassis\ %s = failed to delete baremetal chassis {0} Failed\ to\ remotely\ power\ on\ baremetal\ chassis[uuid\:%s] = Failed to remotely power on baremetal chassis[uuid:{0}] Failed\ to\ remotely\ power\ reset\ baremetal\ chassis[uuid\:%s] = Failed to remotely power reset baremetal chassis[uuid:{0}] @@ -135,6 +233,7 @@ PXE\ Server\ DHCP\ Interface\ %s\ does\ not\ exists,\ or\ it\ does\ not\ have\ a cluster[uuid\:%s]\ and\ pxeserver[uuid\:%s]\ don't\ belong\ to\ one\ zone = cluster[uuid:{0}] and pxeserver[uuid:{1}] don''t belong to one zone cluster[uuid\:%s]\ is\ not\ baremetal\ cluster = cluster[uuid:{0}] is not baremetal cluster baremetal\ pxeserver[uuid\:%s]\ already\ attached\ to\ cluster[uuid\:%s] = baremetal pxeserver[uuid:{0}] already attached to cluster[uuid:{1}] +baremetal\ pxeserver[uuid\:%s]\ is\ not\ compatible\ with\ baremetal\ instances\ in\ cluster[uuid\:%s],\ existing\ nic\ ip\ %s\ is\ out\ of\ pxeserver\ dhcp\ range\ %s\ ~\ %s. = baremetal pxeserver[uuid:{0}] is not compatible with baremetal instances in cluster[uuid:{1}], existing nic ip {2} is out of pxeserver dhcp range {3} ~ {4}. baremetal\ pxeserver[uuid\:\ %s]\ not\ attached\ to\ cluster[uuid\:\ %s] = baremetal pxeserver[uuid: {0}] not attached to cluster[uuid: {1}] failed\ to\ init\ configs\ on\ baremetal\ pxeserver[uuid\:%s] = failed to init configs on baremetal pxeserver[uuid:{0}] failed\ to\ create\ bm\ instance\ configs\ on\ baremetal\ pxeserver[uuid\:%s] = failed to create bm instance configs on baremetal pxeserver[uuid:{0}] @@ -147,6 +246,7 @@ failed\ to\ start\ baremetal\ pxeserver[uuid\:%s] = failed to start baremetal px failed\ to\ stop\ baremetal\ pxeserver[uuid\:%s] = failed to stop baremetal pxeserver[uuid:{0}] failed\ to\ create\ dhcp\ config\ of\ chassis[uuid\:%s]\ on\ pxeserver[uuid\:%s] = failed to create dhcp config of chassis[uuid:{0}] on pxeserver[uuid:{1}] failed\ to\ delete\ dhcp\ config\ of\ chassis[uuid\:%s]\ on\ pxeserver[uuid\:%s] = failed to delete dhcp config of chassis[uuid:{0}] on pxeserver[uuid:{1}] +the\ uuid\ of\ baremtal\ pxeserver\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = the uuid of baremtal pxeserver agent changed[expected:{0}, actual:{1}], it''s most likely the agent was manually restarted. Issue a reconnect to sync the status unable\ to\ connect\ to\ baremetal\ pxeserver[url\:%s],\ because\ %s = unable to connect to baremetal pxeserver[url:{0}], because {1} failed\ to\ mount\ baremetal\ cache\ of\ image[uuid\:%s] = failed to mount baremetal cache of image[uuid:{0}] no\ enough\ space\ left\ in\ baremetal\ image\ cache\ for\ image[uuid\:%s] = no enough space left in baremetal image cache for image[uuid:{0}] @@ -156,9 +256,11 @@ unsupported\ backup\ storage\ type\ for\ baremetal = unsupported backup storage # In Module: baremetal2 bond\ name\ %s\ has\ been\ existed = bond name {0} has been existed nic\ with\ mac\:%s\ has\ been\ bonded = nic with mac:{0} has been bonded +cannot\ find\ the\ cluster\ of\ baremetal2\ chassis[uuid\:%s],\ maybe\ it\ doesn't\ exist = cannot find the cluster of baremetal2 chassis[uuid:{0}], maybe it doesn''t exist there\ is\ no\ baremetal2\ gateway\ found\ in\ cluster[uuid\:%s] = there is no baremetal2 gateway found in cluster[uuid:{0}] there\ is\ no\ usable\ baremetal2\ gateway\ found\ in\ cluster[uuid\:%s] = there is no usable baremetal2 gateway found in cluster[uuid:{0}] there\ is\ no\ baremetal2\ provision\ network\ found\ in\ cluster[uuid\:%s] = there is no baremetal2 provision network found in cluster[uuid:{0}] +baremetal2\ provision\ network[uuid\:%s]\ is\ not\ usable,\ make\ sure\ it's\ Enabled = baremetal2 provision network[uuid:{0}] is not usable, make sure it''s Enabled wrong\ baremetal2\ chassis\ hardware\ info\ format\:\ %s = wrong baremetal2 chassis hardware info format: {0} the\ cpu\ architecture\ of\ the\ chassis[arch\:%s]\ and\ the\ cluster[arch\:%s]\ don't\ match = the cpu architecture of the chassis[arch:{0}] and the cluster[arch:{1}] don''t match only\ baremetal2\ chassis\ with\ boot\ mode\ %s\ is\ supported = only baremetal2 chassis with boot mode {0} is supported @@ -168,10 +270,14 @@ wrong\ baremetal2\ chassis\ disk\ hardware\ info\ format\:\ %s = wrong baremetal other\ chassis\ has\ nics\ with\ the\ same\ mac\ address,\ which\ is\ impossible = other chassis has nics with the same mac address, which is impossible BareMetal2\ Chassis[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled = BareMetal2 Chassis[uuid:{0}] doesn''t exist or is disabled no\ available\ baremetal2\ chassis\ found = no available baremetal2 chassis found +no\ available\ baremetal2\ chassis\ found\ in\ baremetal2\ clusters[uuids\:%s] = no available baremetal2 chassis found in baremetal2 clusters[uuids:{0}] Cannot\ find\ BareMetal2\ Chassis[uuid\:%s],\ it\ may\ have\ been\ deleted = Cannot find BareMetal2 Chassis[uuid:{0}], it may have been deleted License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ elastic-baremetal. = License not found, please apply addon license for product elastic-baremetal. +Hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ elastic-baremetal\ chassis,\ but\ we\ detect\ there\ are\ %s\ in\ the\ database.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = Hijacked detected. Your license[{0}] permits {1} elastic-baremetal chassis, but we detect there are {2} in the database. You can either delete additional chassis or apply a new license. +Insufficient\ elastic-baremetal\ chassis\ number\ licensed.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = Insufficient elastic-baremetal chassis number licensed. You can either delete additional chassis or apply a new license. not\ supported = not supported Bare\ Metal\ IPMI\ 2\ Chassis\ %s\:%d\ already\ exists = Bare Metal IPMI 2 Chassis {0}:{1} already exists +Failed\ to\ reach\ the\ baremetal2\ chassis,\ please\ make\ sure\:\ 1.\ the\ IPMI\ connection\ is\ active;\ 2.\ the\ IPMI\ Address,\ Port,\ Username\ and\ Password\ are\ correct;\ 3.\ IPMI\ Over\ LAN\ is\ enabled\ in\ BIOS. = Failed to reach the baremetal2 chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS. BareMetal2\ Chassis\ of\ IPMI\ address\ %s\ and\ IPMI\ port\ %d\ has\ already\ been\ created. = BareMetal2 Chassis of IPMI address {0} and IPMI port {1} has already been created. Cluster[uuid\:%s]\ is\ not\ a\ BareMetal2\ Cluster. = Cluster[uuid:{0}] is not a BareMetal2 Cluster. no\ usable\ baremetal2\ gateway\ in\ cluster[uuid\:%s] = no usable baremetal2 gateway in cluster[uuid:{0}] @@ -185,25 +291,43 @@ received\ hardware\ info\ for\ unknown\ baremetal2\ chassis[ipmi_addr\:%s,\ ipmi cluster\ type\ and\ hypervisor\ type\ should\ all\ be\ baremetal2\ or\ all\ not = cluster type and hypervisor type should all be baremetal2 or all not the\ architecture\ must\ be\ set\ when\ create\ new\ baremetal2\ clusters = the architecture must be set when create new baremetal2 clusters do\ not\ add\ host\ into\ baremetal2\ cluster = do not add host into baremetal2 cluster +l2\ network\ should\ not\ have\ the\ same\ interface\ name\ with\ provision\ network\ that's\ already\ attached\ to\ the\ cluster = l2 network should not have the same interface name with provision network that''s already attached to the cluster Can\ not\ attach\ third-party\ ceph\ with\ token\ into\ aarch64\ cluster. = Can not attach third-party ceph with token into aarch64 cluster. Can\ not\ attach\ local\ storage\ into\ baremetal2\ cluster. = Can not attach local storage into baremetal2 cluster. +failed\ to\ delete\ convert\ volume\ to\ chassis\ local\ disk\ configurations\ in\ gateway[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s] = failed to delete convert volume to chassis local disk configurations in gateway[uuid:{0}] for baremetal2 instance[uuid:{1}] baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ detach\ provision\ nic\ from\ bonding = baremetal2 instance[uuid:{0}] is not connected, cannot detach provision nic from bonding +failed\ to\ detach\ provision\ nic\ to\ bonding\ on\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to detach provision nic to bonding on baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2} baremetal2\ instance[uuid\:%s]\ not\ connected,\ cannot\ attach\ provision\ nic\ to\ bond = baremetal2 instance[uuid:{0}] not connected, cannot attach provision nic to bond +failed\ to\ attach\ provision\ nic\ to\ bonding\ on\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to attach provision nic to bonding on baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2} chassis\:%s\ disk\ does\ not\ have\ wwn\ info,\ please\ inspect\ chassis\ and\ try\ again = chassis:{0} disk does not have wwn info, please inspect chassis and try again convert\ image\ data\ to\ local\ disk\ failed = convert image data to local disk failed baremetal2\ instance[uuid\:%s]\ convert\ volume\ failed\ on\ baremetal2\ chassis[uuid\:%s]\ ,\ timeout\ after\ %s\ minutes\ = baremetal2 instance[uuid:{0}] convert volume failed on baremetal2 chassis[uuid:{1}] , timeout after {2} minutes failed\ to\ prepare\ provision\ network\ in\ gateway[uuid\:%s],\ because\ %s = failed to prepare provision network in gateway[uuid:{0}], because {1} failed\ to\ destroy\ provision\ network\ in\ gateway[uuid\:%s],\ because\ %s = failed to destroy provision network in gateway[uuid:{0}], because {1} no\ provision\ nic\ found\ for\ baremetal2\ instance[uuid\:%s] = no provision nic found for baremetal2 instance[uuid:{0}] +failed\ to\ create\ provision\ configurations\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed to create provision configurations for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2} +failed\ to\ delete\ provision\ configurations\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed to delete provision configurations for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2} +failed\ to\ create\ console\ proxy\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed to create console proxy for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2} +failed\ to\ change\ default\ network\ from\ l3[uuid\:%s]\ to\ l3[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s],\ because\ %s = failed to change default network from l3[uuid:{0}] to l3[uuid:{1}] for baremetal2 instance[uuid:{2}], because {3} +failed\ to\ ping\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to ping baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2} +failed\ to\ change\ the\ password\ of\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to change the password of baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2} failed\ to\ power\ on\ baremetal2\ chassis[uuid\:%s]\ using\ ipmitool = failed to power on baremetal2 chassis[uuid:{0}] using ipmitool failed\ to\ power\ off\ baremetal2\ chassis[uuid\:%s]\ using\ ipmitool = failed to power off baremetal2 chassis[uuid:{0}] using ipmitool +failed\ to\ power\ off\ baremetal2\ instance[uuid\:%s]\ by\ bm\ agent,\ because\ %s = failed to power off baremetal2 instance[uuid:{0}] by bm agent, because {1} baremetal2\ chassis[uuid\:%s]\ is\ still\ not\ POWER_OFF\ %d\ seconds\ later = baremetal2 chassis[uuid:{0}] is still not POWER_OFF {1} seconds later vmInstanceUuids\ is\ empty = vmInstanceUuids is empty the\ baremetal2\ gateway[uuid\:%s,\ status\:%s]\ is\ not\ Connected = the baremetal2 gateway[uuid:{0}, status:{1}] is not Connected baremetal2\ instance[uuid\:%s]\ not\ connected,\ cannot\ attach\ nic\ to\ it = baremetal2 instance[uuid:{0}] not connected, cannot attach nic to it +failed\ to\ attach\ nic[uuid\:%s]\ to\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to attach nic[uuid:{0}] to baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3} baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ detach\ nic\ from\ it = baremetal2 instance[uuid:{0}] is not connected, cannot detach nic from it +failed\ to\ detach\ nic[uuid\:%s]\ from\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to detach nic[uuid:{0}] from baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3} +failed\ to\ prepare\ volume[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to prepare volume[uuid:{0}] for baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3} +failed\ to\ attach\ volume[uuid\:%s]\ to\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to attach volume[uuid:{0}] to baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3} baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ attach\ volume\ to\ it = baremetal2 instance[uuid:{0}] is not connected, cannot attach volume to it +failed\ to\ get\ volume[uuid\:%s]\ lunid\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed to get volume[uuid:{0}] lunid for baremetal2 instance[uuid:{1}] in gateway[uuid:{2}], because {3} failed\ to\ get\ gateway\ ips\ of\ the\ access\ path[iscsiPath\:\ %s]\ for\ block\ volume\ %s,\ because\ %s = failed to get gateway ips of the access path[iscsiPath: {0}] for block volume {1}, because {2} +failed\ to\ detach\ volume[uuid\:%s]\ from\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed to detach volume[uuid:{0}] from baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3} +failed\ to\ destroy\ volume[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed to destroy volume[uuid:{0}] for baremetal2 instance[uuid:{1}] in gateway[uuid:{2}], because {3} all\ ceph\ mons\ of\ primary\ storage[uuid\:%s]\ are\ not\ in\ Connected\ state = all ceph mons of primary storage[uuid:{0}] are not in Connected state there\ is\ already\ a\ baremetal\ pxe\ server\ with\ management\ ip\ %s,\ do\ not\ use\ it\ to\ create\ baremetal2\ gateway = there is already a baremetal pxe server with management ip {0}, do not use it to create baremetal2 gateway management\ ip[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = management ip[{0}] is neither an IPv4 address nor a valid hostname @@ -220,7 +344,9 @@ cluster[uuid\:%s]\ is\ not\ a\ baremetal2\ cluster = cluster[uuid:{0}] is not a gateway[uuid\:%s]\ does\ not\ exist = gateway[uuid:{0}] does not exist baremetal2\ gateway[uuid\:%s]\ is\ already\ in\ cluster[uuid\:%s] = baremetal2 gateway[uuid:{0}] is already in cluster[uuid:{1}] baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ the\ same\ zone\ as\ cluster[uuid\:%s] = baremetal2 gateway[uuid:{0}] is not in the same zone as cluster[uuid:{1}] +cannot\ change\ the\ cluster\ of\ baremetal2\ gateway[uuid\:%s]\ when\ there\ are\ running\ instances\ depending\ on\ it = cannot change the cluster of baremetal2 gateway[uuid:{0}] when there are running instances depending on it baremetal2\ instance[uuid\:%s]\ doesn't\ exist,\ cannot\ generate\ its\ console\ url = baremetal2 instance[uuid:{0}] doesn''t exist, cannot generate its console url +baremetal2\ gateway[uuid\:%s]\ is\ not\ Connected,\ cannot\ generate\ console\ url\ for\ instance[uuid\:%s] = baremetal2 gateway[uuid:{0}] is not Connected, cannot generate console url for instance[uuid:{1}] cluster[uuid\:%s]\ hypervisorType\ is\ not\ %s = cluster[uuid:{0}] hypervisorType is not {1} cluster[%s]\ is\ not\ baremetal2\ type = cluster[{0}] is not baremetal2 type baremetal2\ instance\ required = baremetal2 instance required @@ -238,49 +364,84 @@ failed\ to\ allocate\ primary\ storage\ in\ clusters[uuids\:%s]\ for\ baremetal2 failed\ to\ allocate\ gateway\ in\ clusters[uuids\:%s]\ for\ baremetal2\ instance[uuid\:%s] = failed to allocate gateway in clusters[uuids:{0}] for baremetal2 instance[uuid:{1}] failed\ to\ allocate\ chassis\ in\ clusters[uuids\:%s]\ for\ baremetal2\ instance[uuid\:%s] = failed to allocate chassis in clusters[uuids:{0}] for baremetal2 instance[uuid:{1}] no\ baremetal2\ cluster\ found\ in\ clusters[uuid\:%s] = no baremetal2 cluster found in clusters[uuid:{0}] +only\ baremetal2\ clusters[uuid\:%s]\ meet\ the\ needs\ for\ chassis\ and\ gateway,\ but\ they\ have\ no\ provision\ network\ attached = only baremetal2 clusters[uuid:{0}] meet the needs for chassis and gateway, but they have no provision network attached +name[%s]\ is\ invalid,\ the\ name\ requirement\:\ 1~128\ characters,\ support\ uppercase\ and\ lowercase\ letters,\ numbers,\ underscores,\ and\ hyphens;\ It\ can\ only\ start\ with\ uppercase\ and\ lowercase\ letters;\ It\ does\ not\ start\ or\ end\ with\ a\ space\ = name[{0}] is invalid, the name requirement: 1~128 characters, support uppercase and lowercase letters, numbers, underscores, and hyphens; It can only start with uppercase and lowercase letters; It does not start or end with a space only\ support\ vpc\ network\ support\ attach\ eip\ on\ baremetal2\ instance = only support vpc network support attach eip on baremetal2 instance bare\ metal\ instance\ not\ allowed\ to\ change\ vm\ nic\ network = bare metal instance not allowed to change vm nic network current\ operation\ is\ not\ supported\ on\ local\ baremetal\ instance = current operation is not supported on local baremetal instance not\ supported\ by\ baremetal2\ instance = not supported by baremetal2 instance baremetal2\ instance[uuid\:%s]\ is\ not\ Connected = baremetal2 instance[uuid:{0}] is not Connected baremetal2\ instance[uuid\:%s]\ is\ not\ stopped = baremetal2 instance[uuid:{0}] is not stopped +baremetal2\ instance[uuid\:%s]\ is\ running\ but\ its\ agent\ is\ not\ Connected = baremetal2 instance[uuid:{0}] is running but its agent is not Connected make\ sure\ all\ baremetal2\ gateways\ on\ provision\ network[uuid\:%s]\ are\ Connected = make sure all baremetal2 gateways on provision network[uuid:{0}] are Connected +baremetal2\ instance[uuid\:%s]\ is\ not\ stopped\ can\ not\ change\ its\ chassis\ offering = baremetal2 instance[uuid:{0}] is not stopped can not change its chassis offering +baremetal2\ instance[uuid\:%s]\ has\ not\ been\ allocated\ a\ chassis,\ start\ the\ instance\ and\ try\ again = baremetal2 instance[uuid:{0}] has not been allocated a chassis, start the instance and try again only\ l3\ network\ with\ ip\ version\ %d\ is\ supported\ by\ baremetal2\ instance = only l3 network with ip version {0} is supported by baremetal2 instance l2\ network\ type\ %s\ not\ supported\ by\ baremetal2\ instance = l2 network type {0} not supported by baremetal2 instance customMac\ is\ mandatory\ when\ attaching\ l3\ network\ to\ baremetal2\ instance = customMac is mandatory when attaching l3 network to baremetal2 instance %s\ is\ not\ valid\ mac\ address = {0} is not valid mac address duplicated\ mac\ address\ %s = duplicated mac address {0} +baremetal2\ instance[uuid\:%s]\ running\ on\ chassis[uuid\:%s],\ which\ doesn't\ have\ non-provisioning\ nic\ with\ mac\ address\ %s = baremetal2 instance[uuid:{0}] running on chassis[uuid:{1}], which doesn''t have non-provisioning nic with mac address {2} mac\ address\ %s\ has\ already\ been\ used,\ try\ another\ one = mac address {0} has already been used, try another one nic\ with\ mac\:%s\ cannot\ be\ attached\ l3Network,\ because\ it\ has\ been\ bonded = nic with mac:{0} cannot be attached l3Network, because it has been bonded third\ party\ ceph\ cannot\ mixed\ with\ other\ primary\ storage = third party ceph cannot mixed with other primary storage remote\ provision\ instance\ not\ support\ attach\ provision\ nic\ to\ bond = remote provision instance not support attach provision nic to bond +cluster[uuid\:%s]\ is\ not\ an\ Enabled\ baremetal2\ cluster,\ cannot\ start\ instance[uuid\:%s]\ in\ it = cluster[uuid:{0}] is not an Enabled baremetal2 cluster, cannot start instance[uuid:{1}] in it +baremetal2\ gateway[uuid\:%s]\ does\ not\ exist\ or\ is\ not\ Enabled\ or\ Connected = baremetal2 gateway[uuid:{0}] does not exist or is not Enabled or Connected +baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ cluster\ [uuid\:%s] = baremetal2 gateway[uuid:{0}] is not in cluster [uuid:{1}] please\ specify\ chassis\ uuid\ or\ chassis\ offering\ uuid\ to\ start\ baremetal2\ instance[uuid\:%s] = please specify chassis uuid or chassis offering uuid to start baremetal2 instance[uuid:{0}] baremetal2\ chassis\ offering[uuid\:%s]\ does\ not\ exist = baremetal2 chassis offering[uuid:{0}] does not exist baremetal2\ chassis\ offering[uuid\:%s]\ is\ not\ Enabled = baremetal2 chassis offering[uuid:{0}] is not Enabled no\ need\ to\ set\ chassisOfferingUuid\ because\ the\ instance\ has\ been\ assigned\ an\ chassis\ already = no need to set chassisOfferingUuid because the instance has been assigned an chassis already no\ need\ to\ set\ chassisOfferingUuid\ because\ the\ instance\ has\ been\ assigned\ an\ chassis\ offering\ already = no need to set chassisOfferingUuid because the instance has been assigned an chassis offering already baremetal2\ chassis[uuid\:%s]\ does\ not\ exist = baremetal2 chassis[uuid:{0}] does not exist +baremetal2\ chassis[uuid\:%s]\ is\ not\ belonging\ to\ chassis\ offering[uuid\:%s] = baremetal2 chassis[uuid:{0}] is not belonging to chassis offering[uuid:{1}] baremetal2\ chassis[uuid\:%s]\ is\ not\ Enabled = baremetal2 chassis[uuid:{0}] is not Enabled baremetal2\ chassis[uuid\:%s]\ has\ already\ been\ allocated = baremetal2 chassis[uuid:{0}] has already been allocated +zone[uuid\:%s]\ is\ specified\ but\ it's\ not\ Enabled,\ can\ not\ create\ baremetal2\ instance\ from\ it = zone[uuid:{0}] is specified but it''s not Enabled, can not create baremetal2 instance from it +cluster[uuid\:%s]\ is\ specified\ but\ it's\ not\ an\ Enabled\ baremetal2\ cluster,\ can\ not\ create\ baremetal2\ instance\ from\ it = cluster[uuid:{0}] is specified but it''s not an Enabled baremetal2 cluster, can not create baremetal2 instance from it neither\ chassisUuid\ nor\ chassisOfferingUuid\ is\ set\ when\ create\ baremetal2\ instance = neither chassisUuid nor chassisOfferingUuid is set when create baremetal2 instance do\ not\ set\ chassisUuid\ and\ chassisOfferingUuid\ at\ the\ same\ time = do not set chassisUuid and chassisOfferingUuid at the same time +baremetal2\ chassis[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = baremetal2 chassis[uuid:{0}] is not Enabled, can''t create baremetal2 instance from it +baremetal2\ chassis[uuid\:%s]\ is\ not\ Available,\ can't\ create\ baremetal2\ instance\ from\ it = baremetal2 chassis[uuid:{0}] is not Available, can''t create baremetal2 instance from it +baremetal2\ chassis\ offering[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = baremetal2 chassis offering[uuid:{0}] is not Enabled, can''t create baremetal2 instance from it +baremetal2\ gateway[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = baremetal2 gateway[uuid:{0}] is not Enabled, can''t create baremetal2 instance from it +baremetal2\ gateway[uuid\:%s]\ is\ not\ Connected,\ can't\ create\ baremetal2\ instance\ from\ it = baremetal2 gateway[uuid:{0}] is not Connected, can''t create baremetal2 instance from it +baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ the\ same\ cluster\ with\ chassis[uuid\:%s] = baremetal2 gateway[uuid:{0}] is not in the same cluster with chassis[uuid:{1}] image\ cannot\ be\ empty\ unless\ chassis\ is\ in\ direct\ mode = image cannot be empty unless chassis is in direct mode direct\ mode\ not\ support\ choose\ image = direct mode not support choose image image[uuid\:%s]\ does\ not\ exist = image[uuid:{0}] does not exist Chassis\ disk[%s]\ not\ have\ enough\ capacity\ for\ image[%s] = Chassis disk[{0}] not have enough capacity for image[{1}] +image[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = image[uuid:{0}] is not Enabled, can''t create baremetal2 instance from it +image[uuid\:%s]\ is\ not\ Ready,\ can't\ create\ baremetal2\ instance\ from\ it = image[uuid:{0}] is not Ready, can''t create baremetal2 instance from it +image[uuid\:%s]\ is\ of\ mediaType\:\ %s,\ only\ RootVolumeTemplate\ can\ be\ used\ to\ create\ baremetal2\ instance = image[uuid:{0}] is of mediaType: {1}, only RootVolumeTemplate can be used to create baremetal2 instance +image[uuid\:%s]\ is\ of\ format\:\ %s,\ only\ %s\ can\ be\ used\ to\ create\ baremetal2\ instance = image[uuid:{0}] is of format: {1}, only {2} can be used to create baremetal2 instance +image[uuid\:%s]\ is\ not\ baremetal2\ image,\ can't\ create\ baremetal2\ instance\ from\ it = image[uuid:{0}] is not baremetal2 image, can''t create baremetal2 instance from it +only\ image\ with\ boot\ mode\ %s\ is\ supported\ to\ create\ baremetal2\ instance = only image with boot mode {0} is supported to create baremetal2 instance different\ boot\ mode\ between\ the\ image\ and\ chassis/offering = different boot mode between the image and chassis/offering +the\ architecture\ of\ baremetal2\ cluster[arch\:%s]\ and\ image[arch\:%s]\ don't\ match = the architecture of baremetal2 cluster[arch:{0}] and image[arch:{1}] don''t match +not\ all\ disk\ offerings[uuids\:%s]\ are\ Enabled,\ can\ not\ create\ baremetal2\ instance\ from\ them = not all disk offerings[uuids:{0}] are Enabled, can not create baremetal2 instance from them +the\ primary\ storage[%s]\ of\ the\ root\ volume\ and\ the\ primary\ storage[%s]\ of\ the\ data\ volume\ are\ not\ in\ the\ same\ cluster = the primary storage[{0}] of the root volume and the primary storage[{1}] of the data volume are not in the same cluster cannot\ decide\ which\ zone\ the\ baremetal2\ instance\ should\ be\ created\ in = cannot decide which zone the baremetal2 instance should be created in baremetal2\ instance[uuid\:%s]\ is\ either\ not\ exist\ or\ not\ Connected,\ cannot\ change\ its\ password = baremetal2 instance[uuid:{0}] is either not exist or not Connected, cannot change its password +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ in\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage.\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ in\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = cannot find the image[uuid:{0}] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {1}, uuid:{2}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it no\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s]\ contains\ the\ ISO[uuid\:%s] = no backup storage attached to the zone[uuid:{0}] contains the ISO[uuid:{1}] Cannot\ find\ BareMetal2\ Instance[uuid\:%s],\ it\ may\ have\ been\ deleted = Cannot find BareMetal2 Instance[uuid:{0}], it may have been deleted %s\ can\ only\ be\ created\ or\ deleted = {0} can only be created or deleted %s\ can\ only\ be\ created\ or\ deleted\ when\ the\ baremetal2\ instance\ is\ Running = {0} can only be created or deleted when the baremetal2 instance is Running +there\ already\ exists\ a\ baremetal2\ provision\ network\ with\ dhcpInterface\ \=\ %s,\ dhcpRangeStartIp\ \=\ %s,\ dhcpRangeEndIp\ \=\ %s,\ dhcpRangeNetmask\ \=\ %s,\ dhcpRangeGateway\ \=\ %s = there already exists a baremetal2 provision network with dhcpInterface = {0}, dhcpRangeStartIp = {1}, dhcpRangeEndIp = {2}, dhcpRangeNetmask = {3}, dhcpRangeGateway = {4} +cannot\ update\ baremetal2\ provision\ network[uuid\:%s]\ dhcp\ configuration\ when\ there\ are\ instances\ depending\ on\ it = cannot update baremetal2 provision network[uuid:{0}] dhcp configuration when there are instances depending on it baremetal2\ provision\ network\ dhcp\ range\ netmask\ %s\ is\ invalid = baremetal2 provision network dhcp range netmask {0} is invalid baremetal2\ provision\ network\ start\ ip\ %s\ and\ stop\ ip\ %s\ do\ not\ belong\ to\ the\ same\ subnet = baremetal2 provision network start ip {0} and stop ip {1} do not belong to the same subnet +cannot\ delete\ baremetal2\ provision\ network[uuid\:%s]\ when\ there\ are\ instances\ depending\ on\ it = cannot delete baremetal2 provision network[uuid:{0}] when there are instances depending on it cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ non-baremetal2\ cluster[uuid\:%s] = cannot attach baremetal2 provision network[uuid:{0}] to non-baremetal2 cluster[uuid:{1}] baremetal2\ provision\ network[uuid\:%s]\ is\ already\ attached\ to\ cluster[uuid\:%s] = baremetal2 provision network[uuid:{0}] is already attached to cluster[uuid:{1}] cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s]\ because\ the\ cluster\ already\ have\ one = cannot attach baremetal2 provision network[uuid:{0}] to cluster[uuid:{1}] because the cluster already have one cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s]\ because\ they\ are\ not\ in\ the\ same\ zone = cannot attach baremetal2 provision network[uuid:{0}] to cluster[uuid:{1}] because they are not in the same zone +cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s],\ because\ we\ need\ to\ make\ sure\ that\ every\ gateway\ attached\ to\ the\ clusters\ that\ have\ the\ same\ provision\ network\ attached = cannot attach baremetal2 provision network[uuid:{0}] to cluster[uuid:{1}], because we need to make sure that every gateway attached to the clusters that have the same provision network attached +provision\ network\ should\ not\ have\ the\ same\ interface\ name\ with\ l2\ networks\ that\ are\ already\ attached\ to\ the\ cluster = provision network should not have the same interface name with l2 networks that are already attached to the cluster +cannot\ detach\ baremetal2\ provision\ network[uuid\:%s]\ when\ there\ are\ running\ instances\ depending\ on\ it = cannot detach baremetal2 provision network[uuid:{0}] when there are running instances depending on it networkUuids\ is\ empty = networkUuids is empty not\ all\ baremetal2\ provision\ networks\ exist\ in\ %s = not all baremetal2 provision networks exist in {0} failed\ to\ update\ provision\ network[uuid\:%s]\ in\ gateway[uuid\:%s]\:\ %s = failed to update provision network[uuid:{0}] in gateway[uuid:{1}]: {2} @@ -313,8 +474,85 @@ please\ set\ the\ correct\ priceUserConfig,\ for\ example\:\ priceUserConfig\:{\ please\ set\ the\ correct\ priceUserConfig,\ for\ example\:\ priceUserConfig\:{\\nvolume\:{\\npriceKeyName\:\\"priceKeyName\\"}} = please set the correct priceUserConfig, for example: priceUserConfig:'{\nvolume:{\npriceKeyName:\"priceKeyName\"}'} unsupported\ billing\ resource\ type\ [%s] = unsupported billing resource type [{0}] +# In Module: block-primary-storage +primaryStorageUuid\ is\ mandatory\ when\ download\ image\ cache = primaryStorageUuid is mandatory when download image cache +the\ block\ primary\ storage[uuid\:%s,\ name\:%s]\ can\ not\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = the block primary storage[uuid:{0}, name:{1}] can not find any available host in attached clusters for instantiating the volume +fail\ to\ find\ a\ host\ to\ map\ for\ volume\ %s = fail to find a host to map for volume {0} +fail\ to\ find\ install\ path\ for\ downloading\ volume\:\ %s,\ please\ prepare\ it\ before\ downloading = fail to find install path for downloading volume: {0}, please prepare it before downloading +fail\ to\ find\ a\ host\ to\ download\ volume\ %s = fail to find a host to download volume {0} +Fail\ to\ get\ host\ initiator\ ref,\ please\ reconnect\ this\ host\:%s = Fail to get host initiator ref, please reconnect this host:{0} +Block\ primary[uuid\:\ %s]\ has\ not\ attached\ to\ any\ clusters = Block primary[uuid: {0}] has not attached to any clusters +Fail\ to\ connect\ block\ primary[uuid\:\ %s],\ because\ no\ connected\ host = Fail to connect block primary[uuid: {0}], because no connected host +Failed\ to\ attach\ block\ primary[uuid\:\ %s]\ to\ cluster[uuid\:\ %],\ because\ no\ connected\ host\ in\ cluster = Failed to attach block primary[uuid: {0}] to cluster[uuid: %], because no connected host in cluster +backing\ up\ snapshots\ to\ backup\ storage\ is\ a\ depreciated\ feature,\ which\ doesn't\ support\ on\ block\ primary\ storage = backing up snapshots to backup storage is a depreciated feature, which doesn''t support on block primary storage +fail\ to\ find\ cluster\ for\ commit\ volume\ on\ ps\:%s = fail to find cluster for commit volume on ps:{0} +fail\ to\ find\ host\ for\ commit\ volume\:%s = fail to find host for commit volume:{0} +KVM\ host[uuid\:\ %s]\ fails\ to\ be\ added\ into\ block\ primary\ storage[uuid\:\ %s],\ %s = KVM host[uuid: {0}] fails to be added into block primary storage[uuid: {1}], {2} +iso[uuid\:\ %s]\ is\ attached\ to\ vm[uuid\:\ ],\ but\ iso\ is\ not\ on\ any\ block\ storage,\ you\ have\ to\ detach\ it,\ before\ migrate\ vm = iso[uuid: {0}] is attached to vm[uuid: ], but iso is not on any block storage, you have to detach it, before migrate vm +fail\ to\ find\ block\ scsi\ lun\ for\ volume\:\ %s = fail to find block scsi lun for volume: {0} +fail\ to\ exchange\ block\ scsi\ lun\ info\:%s = fail to exchange block scsi lun info:{0} +fail\ to\ clean\ up\ after\ detach\ volume = fail to clean up after detach volume +not\ support\ take\ volumes\ snapshots\ on\ multiple\ ps\ when\ including\ ceph = not support take volumes snapshots on multiple ps when including ceph +fail\ to\ map\ lun\ to\ host\ before\ attach\ volume\ to\ vm = fail to map lun to host before attach volume to vm +primary\ storage\ uuid\ is\ mandatory\ when\ delete\ lun = primary storage uuid is mandatory when delete lun +initiatorName\ %s\ is\ occupied\ by\ other\ host,\ please\ regenerate\ initiator\ of\ host\ %s = initiatorName {0} is occupied by other host, please regenerate initiator of host {1} +failed\ to\ download[%s]\ from\ BackupStorage[hostname\:%s]\ to\ block\ primary\ storage[uuid\:%s,\ path\:%s],\ %s = failed to download[{0}] from BackupStorage[hostname:{1}] to block primary storage[uuid:{2}, path:{3}], {4} +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ backend\ device\ is\ null = can not execute map lun to host flow, because backend device is null +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ ps\ host\ ref\ is\ null = can not execute map lun to host flow, because ps host ref is null +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ ps\ host\ ref\ metadata\ is\ empty = can not execute map lun to host flow, because ps host ref metadata is empty +can\ not\ execute\ map\ lun\ to\ host\ flow,\ invalid\ lun\ id = can not execute map lun to host flow, invalid lun id +can\ not\ execute\ map\ lun\ to\ host\ flow,\ invalid\ lun\ lun\ type = can not execute map lun to host flow, invalid lun lun type +fail\ to\ sync\ access\ zones\ because\ %s = fail to sync access zones because {0} +fail\ to\ get\ access\ zone's\ subnet\ because\ %s = fail to get access zone''s subnet because {0} +fail\ to\ query\ all\ hosts,\ because\ of\ %s = fail to query all hosts, because of {0} +fail\ to\ query\ hosts\ %s,\ because\ of\ %s = fail to query hosts {0}, because of {1} +fail\ to\ add\ host\ %s\ into\ hostGroup\ %s,\ because\ of\ %s = fail to add host {0} into hostGroup {1}, because of {2} +fail\ to\ delete\ host\ %s,\ because\ of\ %s = fail to delete host {0}, because of {1} +fail\ to\ delete\ host\ group\ %s,\ because\ of\ %s = fail to delete host group {0}, because of {1} +host\ id\ is\ mandatory\ but\ get\:%s = host id is mandatory but get:{0} +fail\ to\ delete\ initiator\ %s,\ because\ of\ %s = fail to delete initiator {0}, because of {1} +fail\ to\ query\ host\ group,\ because\ of\ %s = fail to query host group, because of {0} +fail\ to\ add\ host\ group\:\ %s,\ error\ message\:%s\ = fail to add host group: {0}, error message:{1} +fail\ to\ query\ lun\ \:\ %s,\ error\ message\:%s\ = fail to query lun : {0}, error message:{1} +fail\ to\ query\ lun\ by\ path\:\ %s,\ error\ message\:%s\ = fail to query lun by path: {0}, error message:{1} +fail\ to\ update\ lun\ name\:\ %s,\ error\ message\:%s\ = fail to update lun name: {0}, error message:{1} +fail\ to\ create\ lun\ name\:\ %s,\ can\ not\ find\ root\ cause = fail to create lun name: {0}, can not find root cause +unable\ to\ do\ the\ operation\ because\ the\ lun[%s]\ has\ been\ occupied = unable to do the operation because the lun[{0}] has been occupied +fail\ to\ create\ lun\ name\:\ %s,\ error\ message\:%s\ = fail to create lun name: {0}, error message:{1} +fail\ to\ query\ lun\ %s,\ because\ of\ %s = fail to query lun {0}, because of {1} +lun\ id\ is\ mandatory\ when\ query\ lun\ map = lun id is mandatory when query lun map +fail\ to\ query\ lun\ map\ for\ host\ group\ %s,\ because\ of\ %s = fail to query lun map for host group {0}, because of {1} +fail\ to\ query\ lun\ map\ %s,\ because\ of\ %s = fail to query lun map {0}, because of {1} +fail\ to\ get\ cluster\ info,\ because\ of\ %s = fail to get cluster info, because of {0} +fail\ to\ map\ lun\ %s\ to\ host\ group\ %s,\ because\ of\ %s = fail to map lun {0} to host group {1}, because of {2} +lun\ map\ id\ is\ mandatory\ but\ get\:%s = lun map id is mandatory but get:{0} +fail\ to\ delete\ lun\ map\ %s,\ because\ of\ %s = fail to delete lun map {0}, because of {1} +lun\ id\ is\ mandatory\ but\ get\:%s = lun id is mandatory but get:{0} +fail\ to\ delete\ lun\ %s,\ because\ of\ %s = fail to delete lun {0}, because of {1} +fail\ to\ get\ storage\ pool\ %s,\ because\ of\ %s = fail to get storage pool {0}, because of {1} +fail\ to\ create\ snapshot\ for\ lun\ %s,\ because\ of\ %s = fail to create snapshot for lun {0}, because of {1} +fail\ to\ query\ snapshots\ %s,\ because\ of\ %s = fail to query snapshots {0}, because of {1} +snapshot\ id\ is\ mandatory\ but\ get\:%s = snapshot id is mandatory but get:{0} +fail\ to\ delete\ snapshot\ %s,\ because\ of\ %s = fail to delete snapshot {0}, because of {1} +fail\ to\ revert\ snapshot\:%s,\ because\ of\:\ %s = fail to revert snapshot:{0}, because of: {1} +fail\ to\ check\ lun\ %s\ session\ state\ ,\ because\ of\:\ %s = fail to check lun {0} session state , because of: {1} +fail\ to\ get\ lun\ %s\ maps,\ because\ of\:\ %s = fail to get lun {0} maps, because of: {1} +fail\ to\ get\ lun\ %s\ remain\ created\ lun\ number,\ because\ of\:\ %s = fail to get lun {0} remain created lun number, because of: {1} +There\ is\ no\ way\ to\ get\ lun\ map\ id,\ we\ just\ return\ as\ failure = There is no way to get lun map id, we just return as failure +lun\ map\ id\ is\ mandatory\ can\ not\ be\ null,\ neither\ 0 = lun map id is mandatory can not be null, neither 0 +lun\ id\ is\ illegal = lun id is illegal +fail\ to\ add\ host\ group\:\ %s = fail to add host group: {0} +fail\ to\ add\ host\:\ %s = fail to add host: {0} +lun\ can\ not\ be\ found = lun can not be found +XStor\ cluster\ is\ unhealthy,\ cluster\ info[cluster_data_state\:\ %s,\ cluster_healthy_state\:\ %s,\ cluster_running_state\:\ %s] = XStor cluster is unhealthy, cluster info[cluster_data_state: {0}, cluster_healthy_state: {1}, cluster_running_state: {2}] +illegal\ lun\ id = illegal lun id +fail\ to\ get\ image\ cache\ lun\ info = fail to get image cache lun info + # In Module: cbd +invalid\ mdsUrl[%s],\ the\ sshUsername\:sshPassword\ part\ is\ invalid.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = invalid mdsUrl[{0}], the sshUsername:sshPassword part is invalid. A valid mdsUrl is in format of {1} invalid\ mdsUrl[%s].\ SSH\ username\ and\ password\ must\ be\ separated\ by\ '\:'\ and\ cannot\ be\ empty.\ A\ valid\ monUrl\ format\ is\ %s = invalid mdsUrl[{0}]. SSH username and password must be separated by '':'' and cannot be empty. A valid monUrl format is {1} +invalid\ mdsUrl[%s],\ hostname\ cannot\ be\ null.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = invalid mdsUrl[{0}], hostname cannot be null. A valid mdsUrl is in format of {1} +invalid\ mdsUrl[%s],\ the\ ssh\ port\ is\ greater\ than\ 65535\ or\ smaller\ than\ 1.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = invalid mdsUrl[{0}], the ssh port is greater than 65535 or smaller than 1. A valid mdsUrl is in format of {1} # In Module: cbt Cbt\ task\ not\ found[uuid\:\ %s] = Cbt task not found[uuid: {0}] @@ -375,6 +613,8 @@ No\ CDP\ backup\ storage\ found\ for\ VM\:\ %s = No CDP backup storage found for No\ CdpBackupFactory\ of\ type[%s]\ found = No CdpBackupFactory of type[{0}] found CDP\ task[uuid\:\ %s]\ not\ found = CDP task[uuid: {0}] not found unexpected\ task\ type\:\ %s = unexpected task type: {0} +The\ operation\ has\ volume[uuid\:\ %s]\ that\ will\ take\ chain\ type\ snapshot.\ Therefore,\ you\ could\ not\ do\ this\ operation\ when\ a\ CDP\ task\ is\ running\ on\ the\ VM\ instance. = The operation has volume[uuid: {0}] that will take chain type snapshot. Therefore, you could not do this operation when a CDP task is running on the VM instance. +Could\ not\ attach\ volume.The\ VM\ instance\ is\ running\ a\ CDP\ task.\ After\ the\ volume\ is\ attached,\ the\ capacity\ required\ for\ full\ backup\ will\ exceed\ the\ CDP\ task\ planned\ size.\ Please\ plan\ the\ size\ properly\ and\ try\ again. = Could not attach volume.The VM instance is running a CDP task. After the volume is attached, the capacity required for full backup will exceed the CDP task planned size. Please plan the size properly and try again. The\ VM[%s]\ for\ volume[%s]\ is\ running\ CDP,\ cannot\ resize\ now. = The VM[{0}] for volume[{1}] is running CDP, cannot resize now. No\ VM\ found\ for\ CDP\ task[uuid\:\ %s] = No VM found for CDP task[uuid: {0}] BackupStorage[uuid\:\ %s]\ already\ been\ deleted = BackupStorage[uuid: {0}] already been deleted @@ -393,6 +633,7 @@ unexpected\ volume[uuid\:\ %s]\ size\:\ %d = unexpected volume[uuid: {0}] size: resize\ volume[uuid\:\ %s]\ failed\:\ %s = resize volume[uuid: {0}] failed: {1} volume[uuid\:\ %s]\ has\ unexpected\ path\:\ %s = volume[uuid: {0}] has unexpected path: {1} Available\ License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ CDP. = Available License not found, please apply addon license for product CDP. +Insufficient\ CDP\ VM\ number\ licensed.\ Your\ license\ permits\ %d\ CDP\ VM,\ there\ are\ %d\ CDP\ VM\ used.\ You\ can\ stop\ or\ disable\ some\ CDP\ tasks\ or\ apply\ a\ new\ license. = Insufficient CDP VM number licensed. Your license permits {0} CDP VM, there are {1} CDP VM used. You can stop or disable some CDP tasks or apply a new license. kvmagent\ restarted = kvmagent restarted kvmagent\ no\ response\ %d\ times = kvmagent no response {0} times recoverVm\:\ host\ uuid\ is\ not\ provided\ and\ original\ host\ is\ not\ found\ for\ VM[uuid\:\ %s] = recoverVm: host uuid is not provided and original host is not found for VM[uuid: {0}] @@ -419,20 +660,28 @@ The\ problem\ may\ be\ caused\ by\ an\ incorrect\ user\ name\ or\ password\ or\ all\ ceph\ mons\ are\ Disconnected\ in\ ceph\ backup\ storage[uuid\:%s] = all ceph mons are Disconnected in ceph backup storage[uuid:{0}] CephMon[hostname\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = CephMon[hostname:{0}] not found on backup storage[uuid:{1}] unable\ to\ connect\ to\ the\ ceph\ backup\ storage[uuid\:%s],\ failed\ to\ connect\ all\ ceph\ monitors. = unable to connect to the ceph backup storage[uuid:{0}], failed to connect all ceph monitors. +there\ is\ another\ CEPH\ backup\ storage[name\:%s,\ uuid\:%s]\ with\ the\ same\ FSID[%s],\ you\ cannot\ add\ the\ same\ CEPH\ setup\ as\ two\ different\ backup\ storage = there is another CEPH backup storage[name:{0}, uuid:{1}] with the same FSID[{2}], you cannot add the same CEPH setup as two different backup storage image[uuid\:\ %s]\ is\ not\ on\ backup\ storage[uuid\:%s,\ name\:%s] = image[uuid: {0}] is not on backup storage[uuid:{1}, name:{2}] unable\ to\ add\ mon\ to\ ceph\ backup\ storage = unable to add mon to ceph backup storage ceph\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = ceph backup storage do not support calculate image hash +cannot\ update\ status\ of\ the\ ceph\ backup\ storage\ mon[uuid\:%s],\ it\ has\ been\ deleted.This\ error\ can\ be\ ignored = cannot update status of the ceph backup storage mon[uuid:{0}], it has been deleted.This error can be ignored Ceph\ bs[uuid\=%s]\ pool\ name\ not\ found = Ceph bs[uuid={0}] pool name not found delete\ volume\ chain\ error,\ continue\ to\ delete = delete volume chain error, continue to delete +the\ backup\ storage[uuid\:%s,\ name\:%s,\ fsid\:%s]\ is\ not\ in\ the\ same\ ceph\ cluster\ with\ the\ primary\ storage[uuid\:%s,\ name\:%s,\ fsid\:%s] = the backup storage[uuid:{0}, name:{1}, fsid:{2}] is not in the same ceph cluster with the primary storage[uuid:{3}, name:{4}, fsid:{5}] fsid\ is\ not\ same\ between\ ps[%s]\ and\ bs[%s],\ create\ template\ is\ forbidden. = fsid is not same between ps[{0}] and bs[{1}], create template is forbidden. all\ monitors\ cannot\ execute\ http\ call[%s] = all monitors cannot execute http call[{0}] +unable\ to\ connect\ to\ the\ ceph\ primary\ storage[uuid\:%s],\ failed\ to\ connect\ all\ ceph\ monitors. = unable to connect to the ceph primary storage[uuid:{0}], failed to connect all ceph monitors. ceph\ primary\ storage[uuid\:%s]\ may\ have\ been\ deleted. = ceph primary storage[uuid:{0}] may have been deleted. the\ fsid\ returned\ by\ mons\ are\ mismatching,\ it\ seems\ the\ mons\ belong\ to\ different\ ceph\ clusters\:\\n = the fsid returned by mons are mismatching, it seems the mons belong to different ceph clusters:\\n +there\ is\ another\ CEPH\ primary\ storage[name\:%s,\ uuid\:%s]\ with\ the\ same\ FSID[%s],\ you\ cannot\ add\ the\ same\ CEPH\ setup\ as\ two\ different\ primary\ storage = there is another CEPH primary storage[name:{0}, uuid:{1}] with the same FSID[{2}], you cannot add the same CEPH setup as two different primary storage +the\ ceph\ primary\ storage[uuid\:%s,\ name\:%s]\ is\ down,\ as\ one\ mon[uuid\:%s]\ reports\ an\ operation\ failure[%s] = the ceph primary storage[uuid:{0}, name:{1}] is down, as one mon[uuid:{2}] reports an operation failure[{3}] unable\ to\ connect\ mons = unable to connect mons unable\ to\ add\ mon\ to\ ceph\ primary\ storage = unable to add mon to ceph primary storage +the\ mon[ip\:%s]\ returns\ a\ fsid[%s]\ different\ from\ the\ current\ fsid[%s]\ of\ the\ cep\ cluster,are\ you\ adding\ a\ mon\ not\ belonging\ to\ current\ cluster\ mistakenly? = the mon[ip:{0}] returns a fsid[{1}] different from the current fsid[{2}] of the cep cluster,are you adding a mon not belonging to current cluster mistakenly? operation\ error,\ because\:\ failed\ to\ get\ response = operation error, because: failed to get response backing\ up\ snapshots\ to\ backup\ storage\ is\ a\ depreciated\ feature,\ which\ will\ be\ removed\ in\ future\ version = backing up snapshots to backup storage is a depreciated feature, which will be removed in future version cannot\ reinit\ rootvolume\ [%s]\ because\ image\ [%s]\ has\ been\ deleted\ and\ imagecache\ cannot\ be\ found = cannot reinit rootvolume [{0}] because image [{1}] has been deleted and imagecache cannot be found +cannot\ find\ backupstorage\ to\ download\ image\ [%s]\ to\ primarystorage\ [%s]\ due\ to\ lack\ of\ Ready\ and\ accessible\ image = cannot find backupstorage to download image [{0}] to primarystorage [{1}] due to lack of Ready and accessible image allocated\ url\ not\ found = allocated url not found invalid\ allocated\ url\:%s = invalid allocated url:{0} cannot\ find\ any\ Connected\ ceph\ mon\ for\ the\ primary\ storage[uuid\:%s] = cannot find any Connected ceph mon for the primary storage[uuid:{0}] @@ -443,7 +692,10 @@ rootVolume[%s]\ is\ already\ in\ use(ceph\ rbd\ image[%s]\ already\ has\ watcher cannot\ find\ cephPrimaryStorage\ pool[poolName\=%s] = cannot find cephPrimaryStorage pool[poolName={0}] cephPrimaryStorage\ pool[poolName\=%s]\ available\ virtual\ capacity\ not\ enough\ for\ size\ %s = cephPrimaryStorage pool[poolName={0}] available virtual capacity not enough for size {1} cannot\ allocate\ pool\ for\ primaryStorage[%s],\ purpose\:\ %s = cannot allocate pool for primaryStorage[{0}], purpose: {1} +cannot\ update\ status\ of\ the\ ceph\ primary\ storage\ mon[uuid\:%s],\ it\ has\ been\ deleted.This\ error\ can\ be\ ignored = cannot update status of the ceph primary storage mon[uuid:{0}], it has been deleted.This error can be ignored Ceph\ ps[uuid\=%s]\ root\ pool\ name\ not\ found = Ceph ps[uuid={0}] root pool name not found +invalid\ uri,\ correct\ example\ is\ ceph\://$POOLNAME/$VOLUMEUUID\ or\ volume\://$VOLUMEUUID\ or\ volumeSnapshotReuse\://$SNAPSHOTUUID = invalid uri, correct example is ceph://$POOLNAME/$VOLUMEUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID +required\ ceph\ pool[uuid\:%s]\ cannot\ satisfy\ conditions\ [availableSize\ >\ %s\ bytes],\ current\ available\ size\ %s = required ceph pool[uuid:{0}] cannot satisfy conditions [availableSize > {1} bytes], current available size {2} cannot\ find\ ceph\ pool\ [%s]\ related\ osdgroup = cannot find ceph pool [{0}] related osdgroup # In Module: cloudformation @@ -480,6 +732,7 @@ Some\ actions\ are\ invalid = Some actions are invalid no\ root\ element\ found,\ please\ check\ your\ cfn\ formation! = no root element found, please check your cfn formation! Wrong\ json\ format,\ causes\:\ %s = Wrong json format, causes: {0} CfnRootDecoder's\ weight\ must\ between\ 0-100,\ 0\ means\ decode\ first,\ default\ is\ 50 = CfnRootDecoder''s weight must between 0-100, 0 means decode first, default is 50 +Condition\ key\:\ %s\ only\ support\ 1\ element\ in\ the\ json\ object\ of\ value,\ but\ got\ %d\ elements! = Condition key: {0} only support 1 element in the json object of value, but got {1} elements! Value\ must\ be\ boolean\ in\ 'Condition'\ field = Value must be boolean in ''Condition'' field Only\ support\ ZStack\ Template\ Functions\ in\ 'Condition'\ field! = Only support ZStack Template Functions in ''Condition'' field! Condition\ body\ cannot\ support\ json\ null\ or\ array! = Condition body cannot support json null or array! @@ -509,6 +762,7 @@ element\ is\ null! = element is null! # In Module: compute cannot\ find\ root\ volume\ of\ vm[uuid\:%s] = cannot find root volume of vm[uuid:{0}] +the\ backup\ storage[uuid\:%s,\ type\:%s]\ requires\ bound\ primary\ storage,\ however,\ the\ primary\ storage\ has\ not\ been\ added = the backup storage[uuid:{0}, type:{1}] requires bound primary storage, however, the primary storage has not been added No\ host\ with\ %s\ found = No host with {0} found either\ volumeUuid\ or\ volumeSnapshotUuid\ must\ be\ set = either volumeUuid or volumeSnapshotUuid must be set zoneUuids,\ clusterUuids,\ hostUuids\ must\ at\ least\ have\ one\ be\ none-empty\ list,\ or\ all\ is\ set\ to\ true = zoneUuids, clusterUuids, hostUuids must at least have one be none-empty list, or all is set to true @@ -528,16 +782,19 @@ webssh\ server\ is\ not\ running. = webssh server is not running. there\ has\ been\ a\ host\ having\ managementIp[%s] = there has been a host having managementIp[{0}] managementIp[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = managementIp[{0}] is neither an IPv4 address nor a valid hostname can\ not\ maintain\ host[uuid\:%s,\ status\:%s]which\ is\ not\ Connected = can not maintain host[uuid:{0}, status:{1}]which is not Connected +the\ password\ for\ the\ physical\ machine\ [%s]\ is\ empty.\ please\ set\ a\ password = the password for the physical machine [{0}] is empty. please set a password path\ cannot\ be\ empty = path cannot be empty path\ must\ be\ an\ absolute\ path\ (start\ with\ '/')\\" = path must be an absolute path (start with ''/'')\\" invalid\ path\ traversal\ detected = invalid path traversal detected mountPoint\ cannot\ be\ empty = mountPoint cannot be empty mount\ point\ must\ be\ an\ absolute\ path\ (start\ with\ '/') = mount point must be an absolute path (start with ''/'') path\ traversal\ detected\ in\ mount\ point = path traversal detected in mount point +the\ mount\ point\ must\ strictly\ follow\ the\ security\ pattern\:\ '^[a-zA-Z0-9_\\-./]+$'.\ this\ requires\:\ \\n1.\ only\ alphanumeric\ characters\ [a-z,\ A-Z,\ 0-9]\\n2.\ limited\ special\ characters\:\ hyphen\ (-),\ underscore\ (_),\ period\ (.),\ and\ forward\ slash\ (/)\\n3.\ must\ be\ a\ valid\ absolute\ path\ starting\ with\ '/'\\n\\nvalid\ examples\:\\n\ \ /mnt/data\\n\ \ /volumes/drive01\\n\ \ /backup-2023.disk\\n\\ninvalid\ value\ detected\:\ '%s' = the mount point must strictly follow the security pattern: ''^[a-zA-Z0-9_\\-./]+$''. this requires: \\n1. only alphanumeric characters [a-z, A-Z, 0-9]\\n2. limited special characters: hyphen (-), underscore (_), period (.), and forward slash (/)\\n3. must be a valid absolute path starting with ''/''\\n\\nvalid examples:\\n /mnt/data\\n /volumes/drive01\\n /backup-2023.disk\\n\\ninvalid value detected: ''{0}'' mountPoint\ should\ not\ end\ with\ '/'\ except\ root\ directory = mountPoint should not end with ''/'' except root directory host[uuid\:%s,\ name\:%s]\ is\ in\ status[%s],\ cannot\ perform\ required\ operation = host[uuid:{0}, name:{1}] is in status[{2}], cannot perform required operation unable\ to\ do\ the\ operation\ because\ the\ host\ is\ in\ status\ of\ Disconnected = unable to do the operation because the host is in status of Disconnected host[uuid\:%s,\ name\:%s]\ is\ in\ state[%s],\ cannot\ perform\ required\ operation = host[uuid:{0}, name:{1}] is in state[{2}], cannot perform required operation +host[%s]\ does\ not\ have\ ipmi\ device\ or\ ipmi\ does\ not\ have\ address.After\ config\ ipmi\ address,\ please\ reconnect\ host\ to\ refresh\ host\ ipmi\ information = host[{0}] does not have ipmi device or ipmi does not have address.After config ipmi address, please reconnect host to refresh host ipmi information Host[%s]\ is\ in\ maintenance\ state,\ VM\ on\ this\ host\ should\ be\ migrated = Host[{0}] is in maintenance state, VM on this host should be migrated failed\ to\ migrate\ vm[uuids\:%s]\ on\ host[uuid\:%s,\ name\:%s,\ ip\:%s],\ will\ try\ stopping\ it. = failed to migrate vm[uuids:{0}] on host[uuid:{1}, name:{2}, ip:{3}], will try stopping it. host\ is\ connecting,\ ping\ failed = host is connecting, ping failed @@ -555,12 +812,14 @@ cannot\ find\ host[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find host[u cluster[uuid\:%s]\ is\ not\ existing = cluster[uuid:{0}] is not existing after\ connecting,\ host[name\:%s,\ ip\:%s]\ returns\ a\ null\ architecture = after connecting, host[name:{0}, ip:{1}] returns a null architecture cluster[uuid\:%s]'s\ architecture\ is\ %s,\ not\ match\ the\ host[name\:%s,\ ip\:%s]\ architecture\ %s = cluster[uuid:{0}]''s architecture is {1}, not match the host[name:{2}, ip:{3}] architecture {4} +failed\ to\ get\ disk\ devices,\ because\ [stderr\:%s,\ stdout\:%s,\ exitErrorMessage\:%s] = failed to get disk devices, because [stderr:{0}, stdout:{1}, exitErrorMessage:{2}] mountPoint\ %s\ is\ already\ mount\ on\ device\ %s = mountPoint {0} is already mount on device {1} device\ %s\ is\ already\ mount\ on\ mountPoint\ %s = device {0} is already mount on mountPoint {1} failed\ to\ get\ UUID\ for\ device\ %s = failed to get UUID for device {0} no\ running\ api[%s]\ task\ on\ hosts = no running api[{0}] task on hosts primary\ storage[uuid\:%s]\ becomes\ disconnected,\ the\ host\ has\ no\ connected\ primary\ storage\ attached = primary storage[uuid:{0}] becomes disconnected, the host has no connected primary storage attached current\ vm\ instance\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = current vm instance state[{0}] doesn''t allow to proceed message[{1}], allowed states are {2} +cpu\ topology\ is\ not\ correct,\ cpuNum[%s],\ configured\ cpuSockets[%s],\ cpuCores[%s],\ cpuThreads[%s];\ Calculated\ cpuSockets[%s],\ cpuCores[%s],\ cpuThreads[%s] = cpu topology is not correct, cpuNum[{0}], configured cpuSockets[{1}], cpuCores[{2}], cpuThreads[{3}]; Calculated cpuSockets[{4}], cpuCores[{5}], cpuThreads[{6}] the\ host[uuid\:%s]\ is\ not\ connected = the host[uuid:{0}] is not connected VM[uuid\:%s]\ has\ attached\ ISO[uuid\:%s] = VM[uuid:{0}] has attached ISO[uuid:{1}] All\ vm[uuid\:%s]\ CD-ROMs\ have\ mounted\ ISO = All vm[uuid:{0}] CD-ROMs have mounted ISO @@ -580,6 +839,7 @@ creation\ rely\ on\ image\ cache[uuid\:%s,\ locate\ ps\ uuids\:\ [%s]],\ cannot\ failed\ to\ allocate\ root\ volume\ to\ the\ primary\ storage[%s] = failed to allocate root volume to the primary storage[{0}] \ Can\ not\ find\ the\ vm's\ host,\ please\ start\ the\ vm[%s],\ then\ mount\ the\ disk = Can not find the vm''s host, please start the vm[{0}], then mount the disk null\ state\ of\ the\ vm[uuid\:%s]\ on\ the\ host[uuid\:%s] = null state of the vm[uuid:{0}] on the host[uuid:{1}] +cannot\ find\ the\ iso[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ running\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = cannot find the iso[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is running\\n2. if the backup storage is in connected status, if not, try reconnecting it hostname\ is\ empty = hostname is empty %s\ is\ not\ a\ valid\ Windows\ NetBIOS\ hostname = {0} is not a valid Windows NetBIOS hostname %s\ is\ a\ reserved\ Windows\ NetBIOS\ hostname = {0} is a reserved Windows NetBIOS hostname @@ -599,6 +859,7 @@ unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ a unable\ to\ change\ to\ a\ non-guest\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = unable to change to a non-guest L3 network. The L3 network[uuid:{0}] is already attached to the vm[uuid: {1}] unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ disabled = unable to change to L3 network. The L3 network[uuid:{0}] is disabled unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ a\ system\ network\ and\ vm\ is\ a\ user\ vm = unable to change to L3 network. The L3 network[uuid:{0}] is a system network and vm is a user vm +unable\ to\ change\ to\ L3\ network[uuid\:%s]\ whose\ l2Network\ is\ not\ attached\ to\ the\ host[uuid\:%s] = unable to change to L3 network[uuid:{0}] whose l2Network is not attached to the host[uuid:{1}] the\ image[name\:%s,\ uuid\:%s]\ is\ an\ ISO,\ rootDiskSize\ must\ be\ set = the image[name:{0}, uuid:{1}] is an ISO, rootDiskSize must be set Can\ not\ create\ CD-ROM\ for\ vm[uuid\:%s]\ which\ is\ in\ state[%s]\ = Can not create CD-ROM for vm[uuid:{0}] which is in state[{1}] Current\ platform\ %s\ not\ support\ update\ nic\ driver\ yet = Current platform {0} not support update nic driver yet @@ -620,6 +881,7 @@ ipv4\ address\ cannot\ be\ empty\ when\ l3\ is\ IPAM\ enabled = ipv4 address can ipv6\ address\ cannot\ be\ empty\ when\ l3\ is\ IPAM\ enabled = ipv6 address cannot be empty when l3 is IPAM enabled ipv6\ prefix\ must\ be\ a\ number,\ but\ got\ [%s] = ipv6 prefix must be a number, but got [{0}] the\ VM[uuid\:%s]\ has\ no\ nic\ on\ the\ L3\ network[uuid\:%s] = the VM[uuid:{0}] has no nic on the L3 network[uuid:{1}] +could\ not\ delete\ static\ ip\ [%s]\ for\ vm\ [uuid\:%s]\ because\ it\ does\ not\ exist = could not delete static ip [{0}] for vm [uuid:{1}] because it does not exist dns[%s]\ should\ be\ ipv%s\ address = dns[{0}] should be ipv{1} address size\ of\ dns\ list\ should\ not\ exceed\ 3 = size of dns list should not exceed 3 vmNicUuid\ should\ be\ set\ for\ Windows\ vm = vmNicUuid should be set for Windows vm @@ -643,6 +905,7 @@ unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ at unable\ to\ attach\ a\ non-guest\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = unable to attach a non-guest L3 network. The L3 network[uuid:{0}] is already attached to the vm[uuid: {1}] unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ disabled = unable to attach a L3 network. The L3 network[uuid:{0}] is disabled unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ a\ system\ network\ and\ vm\ is\ a\ user\ vm = unable to attach a L3 network. The L3 network[uuid:{0}] is a system network and vm is a user vm +unable\ to\ attach\ L3\ network[uuid\:%s]\ to\ VM[uuid\:%s]\ whose\ l2Network\ is\ not\ attached\ to\ the\ host[uuid\:%s] = unable to attach L3 network[uuid:{0}] to VM[uuid:{1}] whose l2Network is not attached to the host[uuid:{2}] unable\ to\ attach\ the\ nic.\ The\ vm[uuid\:\ %s]\ is\ not\ Running\ or\ Stopped;\ the\ current\ state\ is\ %s = unable to attach the nic. The vm[uuid: {0}] is not Running or Stopped; the current state is {1} unable\ to\ attach\ the\ nic.\ The\ nic\ has\ been\ attached\ with\ vm[uuid\:\ %s] = unable to attach the nic. The nic has been attached with vm[uuid: {0}] unable\ to\ attach\ the\ nic.\ Its\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = unable to attach the nic. Its L3 network[uuid:{0}] is already attached to the vm[uuid: {1}] @@ -656,7 +919,6 @@ unable\ to\ detach\ a\ L3\ network.\ The\ vm[uuid\:\ %s]\ is\ not\ Running\ or\ vm[uuid\:%s]\ can\ only\ attach\ volume\ when\ state\ is\ Running\ or\ Stopped,\ current\ state\ is\ %s = vm[uuid:{0}] can only attach volume when state is Running or Stopped, current state is {1} image\ mediaType\ is\ ISO\ but\ missing\ root\ disk\ settings = image mediaType is ISO but missing root disk settings Unexpected\ root\ disk\ settings = Unexpected root disk settings -the\ primary\ storage[%s]\ of\ the\ root\ volume\ and\ the\ primary\ storage[%s]\ of\ the\ data\ volume\ are\ not\ in\ the\ same\ cluster = the primary storage[{0}] of the root volume and the primary storage[{1}] of the data volume are not in the same cluster Unexpected\ data\ disk\ settings.\ dataDiskSizes\ need\ to\ be\ greater\ than\ 0 = Unexpected data disk settings. dataDiskSizes need to be greater than 0 missing\ root\ disk = missing root disk virtio\ tag\ is\ not\ allowed\ when\ virtio\ is\ false = virtio tag is not allowed when virtio is false @@ -685,6 +947,7 @@ failed\ to\ delete\ templated\ vm\ [%s] = failed to delete templated vm [{0}] failed\ to\ delete\ the\ cache\ vmInstance[uuid\:%s]\ of\ templated\ vmInstance[uuid\:%s] = failed to delete the cache vmInstance[uuid:{0}] of templated vmInstance[uuid:{1}] VM[uuid\:%s]\ state\ is\ not\ Running. = VM[uuid:{0}] state is not Running. no\ available\ empty\ cdrom\ for\ VM[uuid\:%s] = no available empty cdrom for VM[uuid:{0}] +the\ ISO[uuid\:%s]\ is\ on\ backup\ storage\ that\ is\ not\ compatible\ of\ the\ primary\ storage[uuid\:%s]\ where\ the\ VM[name\:%s,\ uuid\:%s]\ is\ on = the ISO[uuid:{0}] is on backup storage that is not compatible of the primary storage[uuid:{1}] where the VM[name:{2}, uuid:{3}] is on failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor. = failed to update vm[uuid={0}] on hypervisor. Failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor\:\ The\ modification\ of\ some\ properties\ failed = Failed to update vm[uuid={0}] on hypervisor: The modification of some properties failed the\ vm\ with\ the\ name\ [%s]\ already\ exists = the vm with the name [{0}] already exists @@ -693,11 +956,15 @@ failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor\:\ The\ modification\ of\ some\ ISO[uuid\:%s]\ is\ not\ attached\ to\ VM[uuid\:%s] = ISO[uuid:{0}] is not attached to VM[uuid:{1}] Detaching\ volume\ is\ not\ allowed\ when\ VM[uuid\=%s]\ is\ in\ state[%s] = Detaching volume is not allowed when VM[uuid={0}] is in state[{1}] failed\ to\ detach\ volume[uuid\=%s]\ of\ VM[uuid\=%s] = failed to detach volume[uuid={0}] of VM[uuid={1}] +Unable\ to\ find\ L3Network[uuid\:%s]\ to\ start\ the\ current\ vm,\ it\ may\ have\ been\ deleted,\ Operation\ suggestion\:\ delete\ this\ vm,\ recreate\ a\ new\ vm = Unable to find L3Network[uuid:{0}] to start the current vm, it may have been deleted, Operation suggestion: delete this vm, recreate a new vm One\ vm\ cannot\ create\ %s\ CDROMs,\ vm\ can\ only\ add\ %s\ CDROMs = One vm cannot create {0} CDROMs, vm can only add {1} CDROMs failed\ to\ start\ VM[uuid\:%s] = failed to start VM[uuid:{0}] no\ way\ to\ get\ image\ size\ of\ %s,\ report\ exception. = no way to get image size of {0}, report exception. VM[uuid\:%s]\ can\ only\ add\ %s\ CDROMs = VM[uuid:{0}] can only add {1} CDROMs update\ vm[%s]\ priority\ to\ [%s]\ failed = update vm[{0}] priority to [{1}] failed +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ the\ vm[uuid\:%s]\ volume\ attached\ to\ is\ not\ in\ Stopped\ state,\ current\ state\ is\ %s = unable to reset volume[uuid:{0}] to origin image[uuid:{1}], the vm[uuid:{2}] volume attached to is not in Stopped state, current state is {3} +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ cannot\ find\ image\ cache. = unable to reset volume[uuid:{0}] to origin image[uuid:{1}], cannot find image cache. +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ for\ image\ type\ is\ ISO = unable to reset volume[uuid:{0}] to origin image[uuid:{1}], for image type is ISO VmInstanceStartNewCreatedVmExtensionPoint[%s]\ refuses\ to\ create\ vm[uuid\:%s] = VmInstanceStartNewCreatedVmExtensionPoint[{0}] refuses to create vm[uuid:{1}] VmInstanceStopVmExtensionPoint[%s]\ refuses\ to\ stop\ vm[uuid\:%s] = VmInstanceStopVmExtensionPoint[{0}] refuses to stop vm[uuid:{1}] VmInstanceRebootExtensionPoint[%s]\ refuses\ to\ reboot\ vm[uuid\:%s] = VmInstanceRebootExtensionPoint[{0}] refuses to reboot vm[uuid:{1}] @@ -732,6 +999,7 @@ handle\ system\ tag\ fail\ when\ creating\ vm = handle system tag fail when crea handle\ sshkeypair\ fail\ when\ creating\ vm = handle sshkeypair fail when creating vm unable\ to\ enable\ this\ function.\ There\ are\ multi\ nics\ of\ L3\ network[uuid\:%s]\ in\ the\ vm[uuid\:\ %s] = unable to enable this function. There are multi nics of L3 network[uuid:{0}] in the vm[uuid: {1}] only\ one\ hostname\ system\ tag\ is\ allowed,\ but\ %s\ got = only one hostname system tag is allowed, but {0} got +conflict\ hostname\ in\ system\ tag[%s];\ there\ has\ been\ a\ VM[uuid\:%s]\ having\ hostname[%s]\ on\ L3\ network[uuid\:%s] = conflict hostname in system tag[{0}]; there has been a VM[uuid:{1}] having hostname[{2}] on L3 network[uuid:{3}] invalid\ boot\ device[%s]\ in\ boot\ order[%s] = invalid boot device[{0}] in boot order[{1}] cpuSockets\ must\ be\ an\ integer = cpuSockets must be an integer cpuCores\ must\ be\ an\ integer = cpuCores must be an integer @@ -750,8 +1018,12 @@ invalid\ securityElementEnable[%s],\ %s\ is\ not\ boolean\ class = invalid secur invalid\ usbRedirect[%s],\ %s\ is\ not\ usbRedirect\ tag = invalid usbRedirect[{0}], {1} is not usbRedirect tag invalid\ usbRedirect[%s],\ %s\ is\ not\ boolean\ class = invalid usbRedirect[{0}], {1} is not boolean class rootDiskOfferingUuid\ cannot\ be\ null\ when\ create\ vm\ without\ image = rootDiskOfferingUuid cannot be null when create vm without image +the\ resource[uuid\:%s]\ is\ a\ ROOT\ volume,\ you\ cannot\ change\ its\ owner,\ instead,change\ the\ owner\ of\ the\ VM\ the\ root\ volume\ belongs\ to = the resource[uuid:{0}] is a ROOT volume, you cannot change its owner, instead,change the owner of the VM the root volume belongs to failed\ to\ find\ host\ of\ vm[uuid\=%s] = failed to find host of vm[uuid={0}] +Failed\ to\ instantiate\ volume.\ Because\ vm's\ host[uuid\:\ %s]\ and\ allocated\ primary\ storage[uuid\:\ %s]\ is\ not\ connected. = Failed to instantiate volume. Because vm''s host[uuid: {0}] and allocated primary storage[uuid: {1}] is not connected. +the\ diskAO\ parameter\ is\ incorrect.\ need\ to\ set\ one\ of\ the\ following\ properties,\ and\ can\ only\ be\ one\ of\ them\:\ size,\ templateUuid,\ diskOfferingUuid,\ sourceUuid-sourceType = the diskAO parameter is incorrect. need to set one of the following properties, and can only be one of them: size, templateUuid, diskOfferingUuid, sourceUuid-sourceType the\ disk\ does\ not\ support\ attachment.\ disk\ type\ is\ %s = the disk does not support attachment. disk type is {0} +vm\ current\ state[%s],\ modify\ virtio\ requires\ the\ vm\ state[%s] = vm current state[{0}], modify virtio requires the vm state[{1}] duplicate\ nic\ params = duplicate nic params could\ not\ create\ multi\ SR-IOV\ enabled\ nics\ on\ the\ same\ l3\ network = could not create multi SR-IOV enabled nics on the same l3 network l3NetworkUuid\ of\ vm\ nic\ can\ not\ be\ null = l3NetworkUuid of vm nic can not be null @@ -805,10 +1077,12 @@ Already\ have\ one\ userdata\ systemTag\ for\ diskOffering[uuid\:\ %s]. = Alread Shouldn't\ be\ more\ than\ one\ systemTag\ for\ one\ instanceOffering. = Shouldn''t be more than one systemTag for one instanceOffering. # In Module: console +the\ console\ agent\ is\ not\ connected;\ it's\ mostly\ like\ the\ management\ node\ just\ starts,\ please\ wait\ for\ the\ console\ agent\ connected,\ or\ you\ can\ reconnect\ it\ manually\ if\ disconnected\ for\ a\ long\ time. = the console agent is not connected; it''s mostly like the management node just starts, please wait for the console agent connected, or you can reconnect it manually if disconnected for a long time. cannot\ find\ host\ IP\ of\ the\ vm[uuid\:%s],\ is\ the\ vm\ running??? = cannot find host IP of the vm[uuid:{0}], is the vm running??? vm[uuid\:%s]\ is\ not\ in\ state\ of\ %s,\ current\ state\ is\ %s = vm[uuid:{0}] is not in state of {1}, current state is {2} establish\ VNC\:\ unexpected\ uri\:\ %s = establish VNC: unexpected uri: {0} unable\ to\ check\ console\ proxy\ availability,\ because\ %s = unable to check console proxy availability, because {0} +console\ proxy[uuid\:\ %s,\ status\:\ %s]\ on\ agent[ip\:\ %s]\ is\ not\ Connected,\ fail\ to\ delete\ it = console proxy[uuid: {0}, status: {1}] on agent[ip: {2}] is not Connected, fail to delete it Ansible\ private\ key\ not\ found. = Ansible private key not found. invalid\ management\ node\ UUID[%s] = invalid management node UUID[{0}] there\ is\ other\ process\ using\ the\ port\:\ %s = there is other process using the port: {0} @@ -851,6 +1125,7 @@ service[%s]\ is\ not\ running = service[{0}] is not running cannot\ trigger\ a\ finished\ GC\ job[uuid\:%s,\ name\:%s] = cannot trigger a finished GC job[uuid:{0}, name:{1}] management\ node[id\:%s]\ becomes\ unavailable,\ job[name\:%s,\ id\:%s]\ is\ not\ restartable = management node[id:{0}] becomes unavailable, job[name:{1}, id:{2}] is not restartable unknown\ product\ plugin\ name\:\ %s = unknown product plugin name: {0} +plugin[%s]\ name,\ productKey\ and\ vendor\ cannot\ be\ null = plugin[{0}] name, productKey and vendor cannot be null parameter\ apiId[%s]\ is\ not\ a\ valid\ uuid. = parameter apiId[{0}] is not a valid uuid. http\ timeout = http timeout failed\ to\ %s\ to\ %s\:\ IO\ Error = failed to {0} to {1}: IO Error @@ -910,6 +1185,7 @@ host\ %s\ is\ not\ exists = host {0} is not exists Shell\ fail,\ because\ %s = Shell fail, because {0} add\ integrity\ file[%s.%s]\ fail,\ because\ %s = add integrity file[{0}.{1}] fail, because {2} unsupported\ operation\ for\ EncryptColumnIntegrityFactory = unsupported operation for EncryptColumnIntegrityFactory +the\ shared\ mount\ point\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters = the shared mount point primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters invalid\ certificate\ parameter\ \:\ %s\=%s = invalid certificate parameter : {0}={1} originText\ or\ certificateText\ can\ not\ be\ null = originText or certificateText can not be null the\ security\ machine\ [%s]\ does\ not\ exist = the security machine [{0}] does not exist @@ -938,6 +1214,7 @@ cannot\ find\ SecurityMachine[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot there\ has\ been\ a\ security\ machine\ having\ managementIp[%s] = there has been a security machine having managementIp[{0}] there\ is\ no\ security\ machine\ that\ can\ be\ activated = there is no security machine that can be activated invalid\ token\ type\ %s,\ only\ supports\ %s. = invalid token type {0}, only supports {1}. +the\ identity\ authentication\ function\ is\ enabled\ but\ the\ corresponding\ resource\ pool\ is\ not\ set,\ please\ re-enable\ the\ function\ and\ try\ again = the identity authentication function is enabled but the corresponding resource pool is not set, please re-enable the function and try again cannot\ delete\ the\ resource\ pool\ %s\ when\ in\ use = cannot delete the resource pool {0} when in use cannot\ find\ SecretResourcePool[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find SecretResourcePool[uuid:{0}], it may have been deleted failed\ to\ connect\ client = failed to connect client @@ -980,6 +1257,7 @@ no\ aliyun\ account\ found\ for\ accountUuid\:\ %s = no aliyun account found for # In Module: directory resources\ %s\ has\ already\ been\ bound\ to\ directory\ uuid[%s]\ ,\ multiple\ paths\ are\ not\ supported = resources {0} has already been bound to directory uuid[{1}] , multiple paths are not supported resource\ types\ %s\ are\ not\ supported\ by\ directory,\ allowed\ types\ are\ %s = resource types {0} are not supported by directory, allowed types are {1} +name\ contains\ unsupported\ characters,\ name\ can\ only\ contain\ Chinese\ characters,\ English\ letters,\ numbers,\ spaces,\ and\ the\ following\ characters\:\ ()()【】@._-+\ = name contains unsupported characters, name can only contain Chinese characters, English letters, numbers, spaces, and the following characters: ()()【】@._-+ circular\ dependency\ detected,\ directory\ %s\ and\ directory\ %s\ will\ cause\ circular\ dependency = circular dependency detected, directory {0} and directory {1} will cause circular dependency unable\ to\ find\ directory[uuid\=%s] = unable to find directory[uuid={0}] duplicate\ directory\ name,\ directory[uuid\:\ %s]\ with\ name\ %s\ already\ exists = duplicate directory name, directory[uuid: {0}] with name {1} already exists @@ -1025,6 +1303,7 @@ Ip\ address\ [uuid\:%s]\ is\ not\ belonged\ to\ nic\ [uuid\:%s] = Ip address [uu eip[uuid\:%s]\ has\ not\ attached\ to\ any\ vm\ nic = eip[uuid:{0}] has not attached to any vm nic vip\ ipVersion\ [%d]\ is\ different\ from\ guestIp\ ipVersion\ [%d]. = vip ipVersion [{0}] is different from guestIp ipVersion [{1}]. Vip[%s]\ is\ in\ the\ guest\ ip\ range\ [%s,\ %s] = Vip[{0}] is in the guest ip range [{1}, {2}] +the\ vm[uuid\:%s]\ that\ the\ EIP\ is\ about\ to\ attach\ is\ already\ on\ the\ public\ network[uuid\:%s]\ from\ which\ the\ vip[uuid\:%s,\ name\:%s,\ ip\:%s]\ comes = the vm[uuid:{0}] that the EIP is about to attach is already on the public network[uuid:{1}] from which the vip[uuid:{2}, name:{3}, ip:{4}] comes vip[uuid\:%s]\ has\ been\ occupied\ other\ network\ service\ entity[%s] = vip[uuid:{0}] has been occupied other network service entity[{1}] eip\ can\ not\ be\ created\ on\ system\ vip = eip can not be created on system vip vip[uuid\:%s]\ is\ not\ in\ state[%s],\ current\ state\ is\ %s = vip[uuid:{0}] is not in state[{1}], current state is {2} @@ -1033,6 +1312,7 @@ vm\ state[%s]\ is\ not\ allowed\ to\ operate\ eip,\ maybe\ you\ should\ wait\ th vmNic\ uuid[%s]\ is\ not\ allowed\ add\ eip,\ because\ vmNic\ exist\ portForwarding\ with\ allowedCidr\ rule = vmNic uuid[{0}] is not allowed add eip, because vmNic exist portForwarding with allowedCidr rule cannot\ find\ Eip\ guest\ ip\:\ %s\ in\ vmNic\ ips\ \:%s = cannot find Eip guest ip: {0} in vmNic ips :{1} eip\ [uuid\:%s]\ is\ deleted = eip [uuid:{0}] is deleted +unable\ to\ attach\ the\ L3\ network[uuid\:%s,\ name\:%s]\ to\ the\ vm[uuid\:%s,\ name\:%s],\ because\ the\ L3\ network\ is\ providing\ EIP\ to\ one\ of\ the\ vm's\ nic = unable to attach the L3 network[uuid:{0}, name:{1}] to the vm[uuid:{2}, name:{3}], because the L3 network is providing EIP to one of the vm''s nic # In Module: expon expon\ request\ failed,\ code\ %s,\ message\:\ %s. = expon request failed, code {0}, message: {1}. @@ -1067,11 +1347,16 @@ unknown\ value\ type\ %s,\ key\ \=\ %s = unknown value type {0}, key = {1} failed\ to\ HTTP\ call\ all\ prometheus\ instances = failed to HTTP call all prometheus instances # In Module: faulttolerance +pvm[uuid\:%s]\ and\ svm[uuid\:%s]\ volume\ number\ not\ matches,\ do\ not\ allowed\ to\ start = pvm[uuid:{0}] and svm[uuid:{1}] volume number not matches, do not allowed to start +volume\ with\ index\:\ %d,\ of\ pvm[uuid\:%s]\ and\ svm[uuid\:%s]\ have\ different\ size,\ do\ not\ allowed\ to\ start = volume with index: {0}, of pvm[uuid:{1}] and svm[uuid:{2}] have different size, do not allowed to start +volume\ with\ index\:\ %d,\ of\ pvm[uuid\:%s]\ and\ svm[uuid\:%s]'s\ cache\ volume\ have\ different\ size,\ do\ not\ allowed\ to\ start = volume with index: {0}, of pvm[uuid:{1}] and svm[uuid:{2}]''s cache volume have different size, do not allowed to start image[uuid\:%s]\ is\ still\ used\ by\ fault\ tolerance\ vm[uuid\:%s] = image[uuid:{0}] is still used by fault tolerance vm[uuid:{1}] +could\ not\ delete\ l3\ network[uuid\:%s].\ Fault\ tolerance\ vm[%s]\ in\ states[%s,\ %s]\ still\ using\ it.\ Stop\ related\ fault\ tolerance\ vms\ before\ delete\ l3\ network = could not delete l3 network[uuid:{0}]. Fault tolerance vm[{1}] in states[{2}, {3}] still using it. Stop related fault tolerance vms before delete l3 network Can\ not\ fail-over\ vm[uuid\:%s],\ please\ enable\ ft\ in\ GlobalConfig = Can not fail-over vm[uuid:{0}], please enable ft in GlobalConfig Can\ not\ fail-over\ vm[uuid\:%s],\ please\ confirm\ it\ is\ a\ fault\ tolerance\ vm\ group = Can not fail-over vm[uuid:{0}], please confirm it is a fault tolerance vm group Can\ not\ fail-over\ vm[uuid\:%s],\ because\ fault\ tolerance\ vm\ group\ is\ not\ in\ status\ of\ [%s,\ %s] = Can not fail-over vm[uuid:{0}], because fault tolerance vm group is not in status of [{1}, {2}] Can\ not\ maintain\ host,\ because\ ft\ vms[%s]\ are\ under\ recovering = Can not maintain host, because ft vms[{0}] are under recovering +current\ operation[api\:%s]\ is\ not\ supported\ when\ ft\ vm[uuid\:%s,\ state\:%s]\ is\ not\ stopped = current operation[api:{0}] is not supported when ft vm[uuid:{1}, state:{2}] is not stopped Can\ not\ set\ vm\ level\ to\ %s,\ please\ enable\ ft\ in\ GlobalConfig = Can not set vm level to {0}, please enable ft in GlobalConfig can\ not\ update\ ft\ vm[uuid\:%s]\ cpu\ number,\ need\ to\ stop\ both\ of\ the\ vms = can not update ft vm[uuid:{0}] cpu number, need to stop both of the vms can\ not\ update\ ft\ vm[uuid\:%s]\ memory\ size,\ need\ to\ stop\ both\ of\ the\ vms = can not update ft vm[uuid:{0}] memory size, need to stop both of the vms @@ -1093,6 +1378,7 @@ can\ not\ start\ secondary\ vm,\ because\ primary\ vm\ is\ still\ stopped = can Can\ not\ migrate\ ft\ secondary\ vm[uuid\:%s] = Can not migrate ft secondary vm[uuid:{0}] Can\ not\ migrate\ ft\ primary\ vm[uuid\:%s] = Can not migrate ft primary vm[uuid:{0}] Current\ ft\ vm\ is\ in\ unknown\ status,\ can\ not\ stop\ it,\ please\ try\ to\ fail-over\ it\ manually = Current ft vm is in unknown status, can not stop it, please try to fail-over it manually +unable\ to\ start\ the\ vm[uuid\:%s].\ It\ doesn't\ have\ any\ nic,\ please\ attach\ a\ nic\ and\ try\ again = unable to start the vm[uuid:{0}]. It doesn''t have any nic, please attach a nic and try again an\ other\ fault\ tolerance\ gc\ task\ is\ running,\ cancel\ the\ new\ task\ and\ wait\ return = an other fault tolerance gc task is running, cancel the new task and wait return can\ not\ create\ secondary\ vm,\ because\ primary\ vm\ is\ stopped = can not create secondary vm, because primary vm is stopped created\ svm\ found,\ report\ error\ for\ this\ start\ secondary\ vm\ request = created svm found, report error for this start secondary vm request @@ -1101,6 +1387,7 @@ pvm[uuid\:%s]\ not\ exists = pvm[uuid:{0}] not exists could\ not\ failover.\ Primary\ vm\ is\ unknown\ but\ no\ fault\ tolerance\ network\ address\ available = could not failover. Primary vm is unknown but no fault tolerance network address available could\ not\ failover.\ Secondary\ vm\ is\ unknown\ but\ no\ fault\ tolerance\ network\ address\ available = could not failover. Secondary vm is unknown but no fault tolerance network address available unexpected\ exception = unexpected exception +cannot\ found\ available\ ip\ from\ current\ ft\ network.\ Check\ whether\ global\ config[category\:ft\ name\:fault.tolerance.network.cidr]\ is\ correctly\ set,\ and\ confirm\ that\ host[uuid\:%s]\ own\ ip\ address\ in\ the\ CIDR = cannot found available ip from current ft network. Check whether global config[category:ft name:fault.tolerance.network.cidr] is correctly set, and confirm that host[uuid:{0}] own ip address in the CIDR can\ not\ start\ secondary\ vm,\ because\ primary\ vm\ is\ stopped = can not start secondary vm, because primary vm is stopped not\ fault\ tolerance\ vm\ port\ found = not fault tolerance vm port found failed\ to\ allocate\ port\ of\ nic[uuid\:\ %s]\ on\ host[uuid\:\ %s] = failed to allocate port of nic[uuid: {0}] on host[uuid: {1}] @@ -1123,6 +1410,7 @@ DHCP\ server\ ip\ [%s]\ is\ not\ a\ IPv6\ address = DHCP server ip [{0}] is not DHCP\ server\ ip\ [%s]\ is\ already\ existed\ in\ l3\ network\ [%s] = DHCP server ip [{0}] is already existed in l3 network [{1}] DHCP\ server\ ip\ [%s]\ can\ not\ be\ equaled\ to\ gateway\ ip = DHCP server ip [{0}] can not be equaled to gateway ip DHCP\ server\ ip\ [%s]\ can\ not\ be\ configured\ to\ system\ l3 = DHCP server ip [{0}] can not be configured to system l3 +could\ not\ delete\ ip\ address,\ because\ ip\ [%s]\ is\ dhcp\ server\ ip = could not delete ip address, because ip [{0}] is dhcp server ip could\ not\ set\ dhcp\ v4\ server\ ip,\ because\ there\ is\ no\ ipv4\ range = could not set dhcp v4 server ip, because there is no ipv4 range could\ not\ set\ dhcp\ v4\ server\ ip,\ because\ ip[%s]\ is\ not\ the\ cidr\ of\ l3\ [%s] = could not set dhcp v4 server ip, because ip[{0}] is not the cidr of l3 [{1}] could\ not\ set\ dhcp\ v6\ server\ ip,\ because\ there\ is\ no\ ipv6\ range = could not set dhcp v6 server ip, because there is no ipv6 range @@ -1140,6 +1428,7 @@ failed\ to\ allocate\ DHCP\ server\ IP\ for\ L3\ network[uuid\:%s] = failed to a cannot\ find\ bridge\ name\ for\ L3\ network[%s] = cannot find bridge name for L3 network[{0}] could\ not\ attach\ eip\ because\ there\ is\ no\ gateway\ for\ nic[uuid\:%s] = could not attach eip because there is no gateway for nic[uuid:{0}] could\ not\ attach\ eip\ because\ ipv6\ eip\ can\ ONLY\ be\ attached\ to\ flat\ network = could not attach eip because ipv6 eip can ONLY be attached to flat network +L2Network\ where\ vip's\ L3Network\ based\ hasn't\ attached\ the\ cluster\ where\ vmNic[uuid\:%s]\ located = L2Network where vip''s L3Network based hasn''t attached the cluster where vmNic[uuid:{0}] located can\ not\ bound\ more\ than\ 1\ %s\ eip\ to\ a\ vm\ nic[uuid\:%s]\ of\ flat\ = can not bound more than 1 {0} eip to a vm nic[uuid:{1}] of flat unable\ to\ apply\ the\ EIP\ operation\ for\ the\ the\ vm[uuid\:%s,\ state\:%s],\ because\ cannot\ find\ the\ VM's\ hostUUid = unable to apply the EIP operation for the the vm[uuid:{0}, state:{1}], because cannot find the VM''s hostUUid host[uuid\:%s]\ is\ not\ connected = host[uuid:{0}] is not connected @@ -1220,8 +1509,6 @@ Incorrect\ %s\ settings,\ valid\ value\ is\ %s = Incorrect {0} settings, valid v cannot\ connect\ to\ [%s]\ in\ %d\ milliseconds,\ so\ aliyun\ openapi\ is\ unreachable. = cannot connect to [{0}] in {1} milliseconds, so aliyun openapi is unreachable. [%s,\ %s]\ not\ a\ valid\ ak\ pair,\ please\ check\ it.\ more\ details\:\ %s = [{0}, {1}] not a valid ak pair, please check it. more details: {2} no\ bucket\ found\ for\ backup = no bucket found for backup -accessKey\ and\ keySecret\ must\ be\ set! = accessKey and keySecret must be set! -regionId\ must\ be\ set! = regionId must be set! cannot\ find\ key\ /\ secret\ from\ msg = cannot find key / secret from msg no\ such\ instance\ type\ support\:\ %s = no such instance type support: {0} couldn't\ find\ router\ table\ in\ router\:\ [%s] = couldn''t find router table in router: [{0}] @@ -1240,6 +1527,7 @@ ecs\ image\ existed\ remote,\ name\:\ %s,\ created\ time\:\ %s = ecs image exist no\ such\ instance-offering\ uuid = no such instance-offering uuid mem\ must\ \\\\>\ 1G,\ and\ mem\ GB\ must\ \\\\>\=\ cpu = mem must \\\\> 1G, and mem GB must \\\\>= cpu No\ Available\ instance\ types\ now. = No Available instance types now. +This\ region\ [%s]\ cannot\ produce\ instance\ type\ [%s]\ now,\ please\ select\ another\ instance\ type\ or\ another\ region = This region [{0}] cannot produce instance type [{1}] now, please select another instance type or another region no\ system\ disk\ found\ for\ ecs\:\ [%s],\ ecs\ id\ is\:\ [%s] = no system disk found for ecs: [{0}], ecs id is: [{1}] Only\ delete\ ecs\ which\ status\ is\ running\ or\ stopped,\ now\ is\ %s = Only delete ecs which status is running or stopped, now is {0} Only\ postpaid\ ecs\ support\ delete\ remote,\ the\ indicate\ ecs\ charge\ type\ is\:\ %s = Only postpaid ecs support delete remote, the indicate ecs charge type is: {0} @@ -1263,14 +1551,19 @@ Only\ support\ ImageStoreBackupStorage = Only support ImageStoreBackupStorage image\ name\ cannot\ starts\ with\ http\://\ or\ https\:// = image name cannot starts with http:// or https:// no\ backup\ storage\ found\ for\ imageUuid\:\ %s = no backup storage found for imageUuid: {0} exceeded\ backup\ storage\ found\ for\ the\ imageUuid\:\ %s,\ please\ indicate\ it\ manually = exceeded backup storage found for the imageUuid: {0}, please indicate it manually +valid\ platform\:[%s]\ for\ aliyun\ image\ import,\ valid\ value\ are\:\ [%s] = valid platform:[{0}] for aliyun image import, valid value are: [{1}] image\ [%s]\ is\ not\ enable\ now = image [{0}] is not enable now the\ indicated\ image\ [%s]\ is\ importing\ to\ datacenter\ [%s]\ now... = the indicated image [{0}] is importing to datacenter [{1}] now... ecs\ instance[%s]\ isn't\ existed,\ please\ check\ it. = ecs instance[{0}] isn''t existed, please check it. +Only\ ecs\ instances\ that\ are\ in\ the\ running\ and\ stopped\ status\ can\ detach\ the\ eip\ ,\ but\ the\ ecs\ [%s]\ status\ is\ [%s]\ now\ = Only ecs instances that are in the running and stopped status can detach the eip , but the ecs [{0}] status is [{1}] now virtual\ border\:\ %s\ has\ been\ deleted = virtual border: {0} has been deleted couldn't\ find\ such\ router\ interface\:\ [%s] = couldn''t find such router interface: [{0}] destination\ cidr\ [%s]\ is\ existed\ and\ point\ to\ another\ instance-id\ [%s],\ please\ check\ or\ delete\ it\ first = destination cidr [{0}] is existed and point to another instance-id [{1}], please check or delete it first couldn't\ find\ such\ vr\ entry\:\ [%s] = couldn''t find such vr entry: [{0}] +Only\ esc\ instances\ that\ are\ in\ the\ running\ and\ stopped\ status\ can\ attach\ the\ eip\ ,\ but\ the\ ecs\ [%s]\ status\ is\ [%s]\ now\ = Only esc instances that are in the running and stopped status can attach the eip , but the ecs [{0}] status is [{1}] now Vbr\:\ [%s]\ is\ in\ create\ connection\ progress,\ please\ wait... = Vbr: [{0}] is in create connection progress, please wait... +custom\ cidr\ [%s]\ is\ already\ existed\ in\ vbr\ [%s],\ it\ is\ overlapped\ with\ target\ cidr\ [%s],\ please\ check\ and\ delete\ it\ first. = custom cidr [{0}] is already existed in vbr [{1}], it is overlapped with target cidr [{2}], please check and delete it first. +custom\ cidr\ [%s]\ is\ already\ existed\ in\ vrouter\ [%s],\ it\ is\ overlapped\ with\ target\ cidr\ [%s],\ please\ check\ and\ delete\ it\ first. = custom cidr [{0}] is already existed in vrouter [{1}], it is overlapped with target cidr [{2}], please check and delete it first. No\ Such\ VRouter\ nic\ found\ for\ l3network\:\ %s = No Such VRouter nic found for l3network: {0} No\ Such\ Cidr\ found\ for\ l3network\:\ %s = No Such Cidr found for l3network: {0} No\ Such\ Ecs\ VPC\ found\:\ %s = No Such Ecs VPC found: {0} @@ -1293,6 +1586,8 @@ OssBucket[%s]\ is\ not\ attached. = OssBucket[{0}] is not attached. domain,\ key,\ secret\ must\ be\ set\ all = domain, key, secret must be set all oss\ bucket\ is\ not\ empty! = oss bucket is not empty! Root\ volume\ cannot\ be\ deleted = Root volume cannot be deleted +Cannot\ set\ the\ disk's\ deleteWithInstance\ property\ to\ false\ when\ the\ category\ property\ of\ the\ disk\ is\ ephemeral = Cannot set the disk''s deleteWithInstance property to false when the category property of the disk is ephemeral +Cannot\ set\ the\ disk's\ deleteWithInstance\ property\ to\ false\ when\ the\ category\ property\ of\ the\ disk\ is\ cloud\ and\ portable\ property\ is\ false = Cannot set the disk''s deleteWithInstance property to false when the category property of the disk is cloud and portable property is false The\ disk\ [%s]\ is\ not\ attach\ on\ any\ instance\ = The disk [{0}] is not attach on any instance Only\ data\ disk\ can\ be\ mounted\ on\ ecs = Only data disk can be mounted on ecs The\ disk\ not\ be\ attach\ on\ any\ ecs = The disk not be attach on any ecs @@ -1302,6 +1597,7 @@ The\ disk\ [%s]\ is\ already\ mounted\ on\ the\ instance\ [%s] = The disk [{0}] Only\ data\ disk\ can\ attach\ to\ ecs = Only data disk can attach to ecs Cannot\ attach\ disk\ when\ in\ use = Cannot attach disk when in use Non-independent\ disk\ can\ only\ be\ destroyed\ with\ instances = Non-independent disk can only be destroyed with instances +The\ size\ and\ snapshot\ id\ in\ the\ request\ parameter\ must\ select\ one\ of\ the\ items\ to\ specify\ the\ size\ of\ the\ disk\ or\ create\ a\ disk\ using\ the\ snapshot. = The size and snapshot id in the request parameter must select one of the items to specify the size of the disk or create a disk using the snapshot. Not\ allowed\ create\ disk\ on\ root\ volume\ snapshot = Not allowed create disk on root volume snapshot the\ disk\ name\ or\ description\ cannot\ set\ start\ with\ 'http\://'\ or\ 'https\://'\ = the disk name or description cannot set start with ''http://'' or ''https://'' The\ operation\ allows\ only\ when\ ecs\ state\ of\ the\ ecs\ instance\ status\ be\ running\ or\ stopped = The operation allows only when ecs state of the ecs instance status be running or stopped @@ -1315,7 +1611,6 @@ couldn't\ find\ such\ datacenter\:\ [%s] = couldn''t find such datacenter: [{0}] couldn't\ find\ such\ identityzone\:\ [%s] = couldn''t find such identityzone: [{0}] couldn't\ find\ such\ vpc\:\ [%s] = couldn''t find such vpc: [{0}] couldn't\ find\ such\ vswitch\:\ [%s] = couldn''t find such vswitch: [{0}] -couldn't\ find\ such\ oss\ bucket\:\ [%s] = couldn''t find such oss bucket: [{0}] couldn't\ find\ such\ virtual\ border\ router\:\ [%s] = couldn''t find such virtual border router: [{0}] couldn't\ find\ such\ virtual\ router\:\ [%s] = couldn''t find such virtual router: [{0}] non\ support\ virtual\ router\ type\:\ [%s] = non support virtual router type: [{0}] @@ -1326,7 +1621,6 @@ couldn't\ find\ such\ virtual\ router\:\ %s = couldn''t find such virtual router non\ supported\ virtual\ router\ type\:\ %s = non supported virtual router type: {0} couldn't\ find\ such\ virtual\ router\ from\ vpcUuid\:\ %s = couldn''t find such virtual router from vpcUuid: {0} no\ current\ used\ key/secret\ for\ aliyun! = no current used key/secret for aliyun! -no\ current\ used\ key/secret\ for\ %s! = no current used key/secret for {0}! dcType\ not\ supported\ type\ [%s] = dcType not supported type [{0}] regionId\ [%s]\ already\ created\ by\ ak\ [%s] = regionId [{0}] already created by ak [{1}] DataCenter\ [%s]\ is\ still\ in\ sync\ progress,\ please\ wait. = DataCenter [{0}] is still in sync progress, please wait. @@ -1351,6 +1645,7 @@ next\ hop\ type\ [%s]\ not\ supported\ create\ route\ entry\ now! = next hop typ virtual\ border\ router\ only\ support\ routerinterface\ as\ next\ hop\ type = virtual border router only support routerinterface as next hop type vswitch's\ cidr\ [%s]\ not\ in\ the\ vpc's\ [%s] = vswitch''s cidr [{0}] not in the vpc''s [{1}] cidr\ is\ overlap\ by\ another\ vswitch\:\ %s = cidr is overlap by another vswitch: {0} +invalid\ CidrBlock\:\ %s,\ which\ must\ subnet\ in\ '10.0.0.0/8',\ '172.16.0.0/12',\ '192.168.0.0/16' = invalid CidrBlock: {0}, which must subnet in ''10.0.0.0/8'', ''172.16.0.0/12'', ''192.168.0.0/16'' no\ such\ virtual\ border\ router\:\ %s = no such virtual border router: {0} no\ such\ virtual\ router\:\ %s = no such virtual router: {0} localGateway\ is\ not\ IPv4\:\ %s = localGateway is not IPv4: {0} @@ -1369,6 +1664,8 @@ remoteCidr\ must\ be\ Cidr! = remoteCidr must be Cidr! localCidr\ and\ remoteCidr\ must\ be\ Cidr! = localCidr and remoteCidr must be Cidr! vpngateway\ [%s]\ existed,\ cannot\ delete\ remote = vpngateway [{0}] existed, cannot delete remote +# In Module: i18n-tools + # In Module: iam1 AccountGroup[uuid\:%s,\ name\:%s]\ has\ been\ deleted = AccountGroup[uuid:{0}, name:{1}] has been deleted failed\ to\ move\ account\ group[uuid\:%s]\ to\ it\ self = failed to move account group[uuid:{0}] to it self @@ -1396,6 +1693,7 @@ organization[%s]\ is\ repeated.\ = organization[{0}] is repeated. project[%s]\ is\ not\ exist.\ = project[{0}] is not exist. fail\ to\ build\ VirtualID\ info\ from\ file.\ = fail to build VirtualID info from file. virtualID[uuid\:%s]\ not\ in\ project[uuid\:%s] = virtualID[uuid:{0}] not in project[uuid:{1}] +Can\ not\ do\ operations,\ because\ current\ organization[uuid\:%s]\ is\ staled,\ please\ enable\ it = Can not do operations, because current organization[uuid:{0}] is staled, please enable it organization[uuid\:%s]\ is\ parent\ of\ the\ organization[uuid\:%s],\ cannot\ set\ it\ as\ a\ child\ organization = organization[uuid:{0}] is parent of the organization[uuid:{1}], cannot set it as a child organization the\ project[uuid\:\ %s,\ name\:%s]\ is\ in\ state\ of\ %s\ which\ disallows\ the\ operation[%s] = the project[uuid: {0}, name:{1}] is in state of {2} which disallows the operation[{3}] can\ not\ parse\ the\ cron\ expression = can not parse the cron expression @@ -1405,6 +1703,7 @@ wrong\ virtual\ ID[uuid\:%s],\ not\ existing\ or\ wrong\ password = wrong virtua virtual\ ID[name\:%s]\ is\ disabled = virtual ID[name:{0}] is disabled virtual\ ID[name\:%s]\ not\ belonging\ to\ the\ project[name\:%s] = virtual ID[name:{0}] not belonging to the project[name:{1}] the\ quota[name\:%s]\ of\ Account[uuid\:%s]\ can\ not\ be\ %d,\ otherwise\ it\ will\ exceeds\ the\ quota\ of\ organization[uuid\:%s] = the quota[name:{0}] of Account[uuid:{1}] can not be {2}, otherwise it will exceeds the quota of organization[uuid:{3}] +Can\ not\ do\ operations,\ because\ Current\ virtualID[uuid\:%s]\ is\ staled,\ please\ enable\ it = Can not do operations, because Current virtualID[uuid:{0}] is staled, please enable it only\ admin\ and\ the\ virtual\ ID\ itself\ can\ do\ the\ update = only admin and the virtual ID itself can do the update old\ password\ is\ not\ equal\ to\ the\ original\ password,\ cannot\ update\ the\ password\ of\ virtual\ ID[uuid\:%s] = old password is not equal to the original password, cannot update the password of virtual ID[uuid:{0}] attribute\ name\ cannot\ be\ null,\ value[%s] = attribute name cannot be null, value[{0}] @@ -1424,6 +1723,7 @@ retire\ policy\ must\ be\ deleted\ before\ pull\ the\ project\ out\ of\ Retired\ login\ is\ prohibited\ because\ the\ project\ is\ in\ state\ of\ %s = login is prohibited because the project is in state of {0} no\ quota[name\:%s]\ found = no quota[name:{0}] found organization[uuid\:%s]\ is\ a\ Company\ that\ cannot\ have\ parent\ organization = organization[uuid:{0}] is a Company that cannot have parent organization +parent\ organization[uuid\:%s]\ cannot\ be\ a\ child\ organization[uuid\:%s]\ of\ a\ childOrganization = parent organization[uuid:{0}] cannot be a child organization[uuid:{1}] of a childOrganization duplicate\ virtualID\ name[%s] = duplicate virtualID name[{0}] duplicate\ project\ name[%s] = duplicate project name[{0}] invalid\ project\ name[%s],\ an\ account\ or\ project\ with\ the\ same\ name\ exists = invalid project name[{0}], an account or project with the same name exists @@ -1449,6 +1749,8 @@ invalid\ time[%s],\ it\ should\ be\ in\ format\ of\ for\ example\ 10m,\ 1h,\ 2d invalid\ spending\ value[%s],\ spending\ value\ should\ between\ 0\ and\ %f = invalid spending value[{0}], spending value should between 0 and {1} invalid\ spending\ value[%s],\ it\ should\ be\ in\ format\ of\ for\ example\ 10.001 = invalid spending value[{0}], it should be in format of for example 10.001 invalid\ date\ or\ time[%s],\ it\ cannot\ be\ before\ current\ time[%s] = invalid date or time[{0}], it cannot be before current time[{1}] +virtual\ ID[uuid\:%s]\ already\ has\ admin\ related\ attributes,\ can\ not\ add\ %s = virtual ID[uuid:{0}] already has admin related attributes, can not add {1} +organiztion\ ID[uuid\:%s]\ already\ has\ opoeration\ attributes,\ can\ not\ add\ %s = organiztion ID[uuid:{0}] already has opoeration attributes, can not add {1} virtual\ id[uuid\:%s]\ already\ has\ a\ project\ operator\ attribute = virtual id[uuid:{0}] already has a project operator attribute cannot\ find\ zone[uuid\:%s] = cannot find zone[uuid:{0}] project[uuid\:%s]\ already\ has\ a\ project\ admin = project[uuid:{0}] already has a project admin @@ -1464,6 +1766,7 @@ failed\ to\ login\:\ account\ is\ disabled = failed to login: account is disable wrong\ account\ name\ or\ password = wrong account name or password cannot\ find\ the\ resource[uuid\:%s];\ wrong\ resourceUuid\ or\ the\ resource\ is\ admin\ resource = cannot find the resource[uuid:{0}]; wrong resourceUuid or the resource is admin resource unable\ to\ find\ account[uuid\=%s] = unable to find account[uuid={0}] +Invalid\ ChangeResourceOwner\ operation.Original\ owner\ is\ the\ same\ as\ target\ owner.Current\ account\ is\ [uuid\:\ %s].The\ resource\ target\ owner\ account[uuid\:\ %s].The\ resource\ original\ owner\ account[uuid\:%s]. = Invalid ChangeResourceOwner operation.Original owner is the same as target owner.Current account is [uuid: {0}].The resource target owner account[uuid: {1}].The resource original owner account[uuid:{2}]. cannot\ find\ the\ account[uuid\:%s] = cannot find the account[uuid:{0}] unable\ to\ create\ an\ account.\ An\ account\ already\ called\ %s = unable to create an account. An account already called {0} account\ cannot\ delete\ itself = account cannot delete itself @@ -1485,7 +1788,9 @@ the\ account[uuid\:%s]\ used\ [name\:%s,\ usedValue\:%s]\ exceeds\ request\ quot session\ of\ message[%s]\ is\ null = session of message[{0}] is null session\ uuid\ is\ null = session uuid is null additional\ authentication\ required = additional authentication required +quota\ exceeding.The\ resource\ owner(or\ target\ resource\ owner)\ account[uuid\:\ %s\ name\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s],\ Current\ used\:%s,\ Request\:%s.\ Please\ contact\ the\ administrator. = quota exceeding. The resource owner(or target resource owner) account[uuid: {0} name: {1}] exceeds a quota[name: {2}, value: {3}], Current used:{4}, Request:{5}. Please contact the administrator. quota\ exceeding.\ The\ account[uuid\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s].\ Please\ contact\ the\ administrator. = quota exceeding. The account[uuid: {0}] exceeds a quota[name: {1}, value: {2}]. Please contact the administrator. +quota\ exceeding.\ The\ account[uuid\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s],\ Current\ used\:%s,\ Request\:%s.\ Please\ contact\ the\ administrator. = quota exceeding. The account[uuid: {0}] exceeds a quota[name: {1}, value: {2}], Current used:{3}, Request:{4}. Please contact the administrator. Login\ sessions\ hit\ limit\ of\ max\ allowed\ concurrent\ login\ sessions = Login sessions hit limit of max allowed concurrent login sessions Session\ expired = Session expired unsupported\ login\ type\ %s = unsupported login type {0} @@ -1505,6 +1810,7 @@ account[uuid\:%s]\ has\ no\ access\ to\ resources\ with\ owner-only\ scope\:\ %s # In Module: image Failed\ because\ management\ node\ restarted. = Failed because management node restarted. +the\ backup\ storage[uuid\:%s]\ is\ not\ in\ status\ of\ Connected,\ current\ status\ is\ %s = the backup storage[uuid:{0}] is not in status of Connected, current status is {1} The\ aarch64\ architecture\ does\ not\ support\ legacy. = The aarch64 architecture does not support legacy. volume[uuid\:%s]\ is\ not\ Ready,\ it's\ %s = volume[uuid:{0}] is not Ready, it''s {1} volume[uuid\:%s]\ is\ not\ Enabled,\ it's\ %s = volume[uuid:{0}] is not Enabled, it''s {1} @@ -1529,7 +1835,6 @@ the\ image[uuid\:%s,\ name\:%s]\ is\ not\ deleted\ on\ the\ backup\ storage[uuid Cannot\ find\ image[uuid\:%s],\ it\ may\ have\ been\ deleted = Cannot find image[uuid:{0}], it may have been deleted Failed\ to\ download\ image[name\:%s]\ on\ all\ backup\ storage%s. = Failed to download image[name:{0}] on all backup storage{1}. unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids%s,\ list\ errors\ are\:\ %s = unable to allocate backup storage specified by uuids{0}, list errors are: {1} -image\ [uuid\:%s]\ has\ been\ deleted = image [uuid:{0}] has been deleted failed\ to\ create\ image\ from\ root\ volume[uuid\:%s]\ on\ all\ backup\ storage,\ see\ cause\ for\ one\ of\ errors = failed to create image from root volume[uuid:{0}] on all backup storage, see cause for one of errors cannot\ find\ proper\ backup\ storage = cannot find proper backup storage failed\ to\ allocate\ all\ backup\ storage[uuid\:%s],\ a\ list\ of\ error\:\ %s = failed to allocate all backup storage[uuid:{0}], a list of error: {1} @@ -1575,11 +1880,17 @@ failed\ to\ increase\ vm\ cpu,\ error\ details\:\ %s = failed to increase vm cpu unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:%d]\ to\ check\ DNS = unable to connect to KVM[ip:{0}, username:{1}, sshPort:{2}] to do DNS check, please check if username/password is wrong the\ host[uuid\:%s,\ status\:%s]\ is\ not\ Connected = the host[uuid:{0}, status:{1}] is not Connected cannot\ do\ the\ operation\ on\ the\ KVM\ host = cannot do the operation on the KVM host +cannot\ do\ volume\ snapshot\ merge\ when\ vm[uuid\:%s]\ is\ in\ state\ of\ %s.\ The\ operation\ is\ only\ allowed\ when\ vm\ is\ Running\ or\ Stopped = cannot do volume snapshot merge when vm[uuid:{0}] is in state of {1}. The operation is only allowed when vm is Running or Stopped +live\ volume\ snapshot\ merge\ needs\ libvirt\ version\ greater\ than\ %s,\ current\ libvirt\ version\ is\ %s.\ Please\ stop\ vm\ and\ redo\ the\ operation\ or\ detach\ the\ volume\ if\ it's\ data\ volume = live volume snapshot merge needs libvirt version greater than {0}, current libvirt version is {1}. Please stop vm and redo the operation or detach the volume if it''s data volume vm[uuid\:%s]\ is\ not\ Running\ or\ Stopped,\ current\ state[%s] = vm[uuid:{0}] is not Running or Stopped, current state[{1}] kvm\ host[uuid\:%s,\ name\:%s,\ ip\:%s]\ doesn't\ not\ support\ live\ snapshot.\ please\ stop\ vm[uuid\:%s]\ and\ try\ again = kvm host[uuid:{0}, name:{1}, ip:{2}] doesn''t not support live snapshot. please stop vm[uuid:{3}] and try again failed\ to\ migrate\ VM = failed to migrate VM +failed\ to\ update\ nic[vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s = failed to update nic[vm:{0}] on kvm host[uuid:{1}, ip:{2}],because {3} +failed\ to\ attach\ nic[uuid\:%s,\ vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s,\ please\ try\ again\ or\ delete\ device[%s]\ by\ yourself = failed to attach nic[uuid:{0}, vm:{1}] on kvm host[uuid:{2}, ip:{3}],because {4}, please try again or delete device[{5}] by yourself +failed\ to\ attach\ nic[uuid\:%s,\ vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s = failed to attach nic[uuid:{0}, vm:{1}] on kvm host[uuid:{2}, ip:{3}],because {4} failed\ to\ detach\ data\ volume[uuid\:%s,\ installPath\:%s]\ from\ vm[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = failed to detach data volume[uuid:{0}, installPath:{1}] from vm[uuid:{2}, name:{3}] on kvm host[uuid:{4}, ip:{5}], because {6} In\ the\ hypervisorType[%s],\ attach\ volume\ is\ not\ allowed\ in\ the\ current\ vm\ instance\ state[%s]. = In the hypervisorType[{0}], attach volume is not allowed in the current vm instance state[{1}]. +failed\ to\ attach\ data\ volume[uuid\:%s,\ installPath\:%s]\ to\ vm[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = failed to attach data volume[uuid:{0}, installPath:{1}] to vm[uuid:{2}, name:{3}] on kvm host[uuid:{4}, ip:{5}], because {6} failed\ to\ destroy\ vm[uuid\:%s\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = failed to destroy vm[uuid:{0} name:{1}] on kvm host[uuid:{2}, ip:{3}], because {4} unable\ to\ destroy\ vm[uuid\:%s,\ \ name\:%s]\ on\ kvm\ host\ [uuid\:%s,\ ip\:%s],\ because\ %s = unable to destroy vm[uuid:{0}, name:{1}] on kvm host [uuid:{2}, ip:{3}], because {4} unable\ to\ destroy\ a\ vm = unable to destroy a vm @@ -1598,6 +1909,7 @@ host\ %s\ is\ not\ managed\ by\ current\ mn\ node = host {0} is not managed by c host\ %s\ is\ not\ connected,\ skip\ to\ restart\ kvmagent = host {0} is not connected, skip to restart kvmagent running\ task\ exists\ on\ host\ %s = running task exists on host {0} failed\ to\ get\ local\ running\ tasks\ in\ some\ MN = failed to get local running tasks in some MN +detected\ abnormal\ status[host\ uuid\ change,\ expected\:\ %s\ but\:\ %s\ or\ agent\ version\ change,\ expected\:\ %s\ but\:\ %s]\ of\ kvmagent,it's\ mainly\ caused\ by\ kvmagent\ restarts\ behind\ zstack\ management\ server.\ Report\ this\ to\ ping\ task,\ it\ will\ issue\ a\ reconnect\ soon = detected abnormal status[host uuid change, expected: {0} but: {1} or agent version change, expected: {2} but: {3}] of kvmagent,it''s mainly caused by kvmagent restarts behind zstack management server. Report this to ping task, it will issue a reconnect soon unable\ to\ connect\ to\ kvm\ host[uuid\:%s,\ ip\:%s,\ url\:%s],\ because\ %s = unable to connect to kvm host[uuid:{0}, ip:{1}, url:{2}], because {3} host\ can\ not\ access\ any\ primary\ storage = host can not access any primary storage connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = connection error for KVM host[uuid:{0}, ip:{1}] @@ -1607,6 +1919,8 @@ failed\ to\ connect\ host[UUID\=%s]\ with\ SSH\ password = failed to connect hos failed\ to\ connect\ host[UUID\=%s]\ with\ private\ key = failed to connect host[UUID={0}] with private key unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:\ %d,\ ]\ to\ do\ DNS\ check,\ please\ check\ if\ username/password\ is\ wrong;\ %s = unable to connect to KVM[ip:{0}, username:{1}, sshPort: {2}, ] to do DNS check, please check if username/password is wrong; {3} failed\ to\ ping\ all\ DNS/IP\ in\ %s;\ please\ check\ /etc/resolv.conf\ to\ make\ sure\ your\ host\ is\ able\ to\ reach\ public\ internet = failed to ping all DNS/IP in {0}; please check /etc/resolv.conf to make sure your host is able to reach public internet +unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:%d]\ to\ check\ the\ management\ node\ connectivity,please\ check\ if\ username/password\ is\ wrong;\ %s = unable to connect to KVM[ip:{0}, username:{1}, sshPort:{2}] to check the management node connectivity,please check if username/password is wrong; {3} +the\ KVM\ host[ip\:%s]\ cannot\ access\ the\ management\ node's\ callback\ url.\ It\ seems\ that\ the\ KVM\ host\ cannot\ reach\ the\ management\ IP[%s].\ %s\ %s = the KVM host[ip:{0}] cannot access the management node''s callback url. It seems that the KVM host cannot reach the management IP[{1}]. {2} {3} unable\ to\ check\ whether\ the\ host\ is\ taken\ over = unable to check whether the host is taken over unable\ to\ get\ the\ host\ takeover\ information = unable to get the host takeover information the\ host[ip\:%s]\ has\ been\ taken\ over,\ because\ the\ takeover\ flag[HostUuid\:%s]\ already\ exists\ and\ utime[%d]\ has\ not\ exceeded\ host\ ping\ interval[%d] = the host[ip:{0}] has been taken over, because the takeover flag[HostUuid:{1}] already exists and utime[{2}] has not exceeded host ping interval[{3}] @@ -1637,6 +1951,7 @@ vm[uuid\:%s]\ crashes\ due\ to\ kernel\ error = vm[uuid:{0}] crashes due to kern host[uuid\:\ %s]\ memory\ ecc\ triggered,\ detail\:\ %s = host[uuid: {0}] memory ecc triggered, detail: {1} there\ are\ still\ hosts\ not\ have\ the\ same\ cpu\ model,\ details\:\ %s = there are still hosts not have the same cpu model, details: {0} pci\ bridge\ need\ a\ value\ greater\ than\ 0\ and\ lower\ than\ 32 = pci bridge need a value greater than 0 and lower than 32 +vm\ current\ state[%s],\ modify\ bus\ type\ requires\ the\ vm\ state[%s] = vm current state[{0}], modify bus type requires the vm state[{1}] vm\ do\ not\ support\ having\ both\ SCSI\ and\ Virtio-SCSI\ bus\ type\ volumes\ simultaneously. = vm do not support having both SCSI and Virtio-SCSI bus type volumes simultaneously. host[uuid\:%s]\ does\ not\ have\ cpu\ model\ information,\ you\ can\ reconnect\ the\ host\ to\ fix\ it = host[uuid:{0}] does not have cpu model information, you can reconnect the host to fix it failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to create bridge[{0}] for l2Network[uuid:{1}, type:{2}] on kvm host[uuid:{3}], because {4} @@ -1647,6 +1962,7 @@ failed\ to\ check\ bridge[%s]\ for\ l2VlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vlan\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to delete bridge[{0}] for l2Network[uuid:{1}, type:{2}, vlan:{3}] on kvm host[uuid:{4}], because {5} failed\ to\ apply\ rules\ of\ security\ group\ rules\ to\ kvm\ host[uuid\:%s],\ because\ %s = failed to apply rules of security group rules to kvm host[uuid:{0}], because {1} failed\ to\ check\ default\ rules\ of\ security\ group\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to check default rules of security group on kvm host[uuid:{0}], because {1} +Failed\ to\ start\ vm,\ because\ can\ not\ disable\ vm.cpu.hypervisor.feature\ with\ vm.cpuMode\ none = Failed to start vm, because can not disable vm.cpu.hypervisor.feature with vm.cpuMode none cannot\ get\ vmUuid\ from\ msg\ %s = cannot get vmUuid from msg {0} unable\ to\ do\ vm\ sync\ on\ host[uuid\:%s,\ ip\:%s]\ because\ %s = unable to do vm sync on host[uuid:{0}, ip:{1}] because {2} The\ vm[%s]\ state\ is\ in\ shutdown\ for\ a\ long\ time,\ check\ whether\ the\ vm\ is\ normal = The vm[{0}] state is in shutdown for a long time, check whether the vm is normal @@ -1666,7 +1982,6 @@ ldapServer[uuid\=%s,\ name\=%s]\ has\ been\ deleted = ldapServer[uuid={0}, name= The\ LDAP\ server[url\=%s,base\=%s]\ already\ exists = The LDAP server[url={0},base={1}] already exists all\ ldap\ account\ importing\ attempt\ is\ failed.\ ldapServerUuid\=%s = all ldap account importing attempt is failed. ldapServerUuid={0} all\ ldap\ account\ unbinding\ attempt\ is\ failed.\ ldapServerUuid\=%s = all ldap account unbinding attempt is failed. ldapServerUuid={0} -not\ support = not support query\ ldap\ entry\ fail,\ %s = query ldap entry fail, {0} query\ ldap\ entry[filter\=%s]\ fail,\ because\ %s = query ldap entry[filter={0}] fail, because {1} user[%s]\ is\ not\ exists\ on\ LDAP/AD\ server[address\=%s,\ baseDN\=%s] = user[{0}] is not exists on LDAP/AD server[address={1}, baseDN={2}] @@ -1756,6 +2071,7 @@ could\ not\ add\ backend\ server\ vmnic\ to\ serverGroup[uuid\:%s]\ ,because\ vm L3\ networks[uuids\:%s]\ of\ the\ vm\ nics\ has\ no\ network\ service[%s]\ enabled = L3 networks[uuids:{0}] of the vm nics has no network service[{1}] enabled the\ vm\ nics[uuid\:%s]\ are\ already\ on\ the\ load\ balancer\ servegroup\ [uuid\:%s] = the vm nics[uuid:{0}] are already on the load balancer servegroup [uuid:{1}] could\ not\ add\ backend\ server\ vmnic\ to\ serverGroup\ [uuid\:%s],\ because\ vmnic\ ip\ [ipAddress\:%s]\ is\ repeated = could not add backend server vmnic to serverGroup [uuid:{0}], because vmnic ip [ipAddress:{1}] is repeated +could\ not\ add\ vm\ nic\ [uuid\:%s]\ to\ server\ group\ [uuid\:%s]\ because\ listener\ [uuid\:%s]\ attached\ this\ server\ group\ already\ the\ nic\ to\ be\ added = could not add vm nic [uuid:{0}] to server group [uuid:{1}] because listener [uuid:{2}] attached this server group already the nic to be added could\ not\ add\ backend\ server\ ip\ to\ serverGroup\ [uuid\:%s],\ because\ ip\ [ipAddress\:%s]\ is\ repeated = could not add backend server ip to serverGroup [uuid:{0}], because ip [ipAddress:{1}] is repeated invalid\ \ weight[serverIp\:%s,weight\:%s],\ weight\ is\ not\ in\ the\ range\ [%d,\ %d] = invalid weight[serverIp:{0},weight:{1}], weight is not in the range [{2}, {3}] the\ server\ ips\ [uuid\:%s]\ are\ already\ on\ the\ load\ balancer\ servegroup\ [uuid\:%s] = the server ips [uuid:{0}] are already on the load balancer servegroup [uuid:{1}] @@ -1783,6 +2099,7 @@ could\ not\ add\ backend\ server\ ip\ to\ serverGroup\ [uuid\:%s],\ because\ ip\ could\ not\ add\ server\ ip\ to\ share\ load\ balancer\ server\ group = could not add server ip to share load balancer server group could\ not\ change\ backendserver,\ beacause\ vmincs\ and\ serverips\ is\ null = could not change backendserver, beacause vmincs and serverips is null can\ not\ get\ service\ providerType\ for\ load\ balancer\ listener\ [uuid\:%s] = can not get service providerType for load balancer listener [uuid:{0}] +service\ provider\ type\ mismatching.\ The\ load\ balancer[uuid\:%s]\ is\ provided\ by\ the\ service\ provider[type\:%s],\ but\ new\ service\ provider\ is\ [type\:\ %s] = service provider type mismatching. The load balancer[uuid:{0}] is provided by the service provider[type:{1}], but new service provider is [type: {2}] there\ is\ listener\ with\ same\ port\ [%s]\ and\ same\ load\ balancer\ [uuid\:%s] = there is listener with same port [{0}] and same load balancer [uuid:{1}] invalid\ health\ checking\ parameters[%s],\ the\ format\ is\ method\:URI\:code,\ for\ example,\ GET\:/index.html\:http_2xx = invalid health checking parameters[{0}], the format is method:URI:code, for example, GET:/index.html:http_2xx cannot\ find\ the\ load\ balancer[uuid\:%s] = cannot find the load balancer[uuid:{0}] @@ -1807,18 +2124,28 @@ invalid\ balancer\ weight\ for\ nic\:%s,\ %d\ is\ not\ in\ the\ range\ [%d,\ %d] # In Module: localstorage disk\ capacity[%s\ bytes]\ required = disk capacity[{0} bytes] required local\ storage\ volume[uuid\:%s]\ is\ not\ on\ this\ host = local storage volume[uuid:{0}] is not on this host +To\ create\ volume\ on\ the\ local\ primary\ storage,\ you\ must\ specify\ the\ host\ that\ the\ volume\ is\ going\ to\ be\ created\ using\ the\ system\ tag\ [%s] = To create volume on the local primary storage, you must specify the host that the volume is going to be created using the system tag [{0}] +invalid\ uri,\ correct\ example\ is\ file\://$URL;hostUuid\://$HOSTUUID\ or\ volume\://$VOLUMEUUID\ or\ volumeSnapshotReuse\://$SNAPSHOTUUID = invalid uri, correct example is file://$URL;hostUuid://$HOSTUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID the\ volume[uuid\:%s]\ is\ not\ on\ any\ local\ primary\ storage = the volume[uuid:{0}] is not on any local primary storage the\ volume[uuid\:%s]\ is\ already\ on\ the\ host[uuid\:%s] = the volume[uuid:{0}] is already on the host[uuid:{1}] the\ primary\ storage[uuid\:%s]\ is\ not\ found = the primary storage[uuid:{0}] is not found the\ primary\ storage[uuid\:%s]\ is\ disabled\ or\ maintenance\ cold\ migrate\ is\ not\ allowed = the primary storage[uuid:{0}] is disabled or maintenance cold migrate is not allowed +the\ dest\ host[uuid\:%s]\ doesn't\ belong\ to\ the\ local\ primary\ storage[uuid\:%s]\ where\ the\ volume[uuid\:%s]\ locates = the dest host[uuid:{0}] doesn''t belong to the local primary storage[uuid:{1}] where the volume[uuid:{2}] locates +the\ dest\ host[uuid\:%s]\ doesn't\ have\ enough\ physical\ capacity\ due\ to\ the\ threshold\ of\ primary\ storage[uuid\:%s]\ is\ %f\ but\ available\ physical\ capacity\ is\ %d = the dest host[uuid:{0}] doesn''t have enough physical capacity due to the threshold of primary storage[uuid:{1}] is {2} but available physical capacity is {3} the\ volume[uuid\:%s]\ is\ not\ in\ status\ of\ Ready,\ cannot\ migrate\ it = the volume[uuid:{0}] is not in status of Ready, cannot migrate it +the\ data\ volume[uuid\:%s,\ name\:\ %s]\ is\ still\ attached\ to\ the\ VM[uuid\:%s].\ Please\ detach\ it\ before\ migration = the data volume[uuid:{0}, name: {1}] is still attached to the VM[uuid:{2}]. Please detach it before migration +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ is\ in\ state\ of\ %s,\ please\ stop\ it\ before\ migration = the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm is in state of {2}, please stop it before migration +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ still\ has\ %s\ data\ volumes\ attached,\ please\ detach\ them\ before\ migration = the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm still has {2} data volumes attached, please detach them before migration +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ still\ has\ ISO\ attached,\ please\ detach\ it\ before\ migration = the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm still has ISO attached, please detach it before migration The\ clusterUuid\ of\ vm[uuid\:%s]\ cannot\ be\ null\ when\ migrate\ the\ root\ volume[uuid\:%s,\ name\:\ %s] = The clusterUuid of vm[uuid:{0}] cannot be null when migrate the root volume[uuid:{1}, name: {2}] The\ two\ clusters[uuid\:%s,uuid\:%s]\ cannot\ access\ each\ other\ in\ l2\ network\ \ when\ migrate\ the\ vm[uuid\:%s]\ to\ another\ cluster = The two clusters[uuid:{0},uuid:{1}] cannot access each other in l2 network when migrate the vm[uuid:{2}] to another cluster the\ url[%s]\ is\ not\ an\ absolute\ path\ starting\ with\ '/' = the url[{0}] is not an absolute path starting with ''/'' \ the\ url\ contains\ an\ invalid\ folder[/dev\ or\ /proc\ or\ /sys] = the url contains an invalid folder[/dev or /proc or /sys] The\ clusterUuid\ of\ vm\ cannot\ be\ null\ when\ migrate\ the\ vm = The clusterUuid of vm cannot be null when migrate the vm The\ primary\ storage[uuid\:%s]\ is\ disabled\ cold\ migrate\ is\ not\ allowed = The primary storage[uuid:{0}] is disabled cold migrate is not allowed +volume[uuid\:%s]\ is\ not\ on\ the\ local\ storage\ anymore,it\ may\ have\ been\ deleted = volume[uuid:{0}] is not on the local storage anymore,it may have been deleted local\ primary\ storage[uuid\:%s]\ doesn't\ have\ the\ host[uuid\:%s] = local primary storage[uuid:{0}] doesn''t have the host[uuid:{1}] +failed\ to\ download\ image[uuid\:%s]\ to\ all\ hosts\ in\ the\ local\ storage[uuid\:%s].\ %s = failed to download image[uuid:{0}] to all hosts in the local storage[uuid:{1}]. {2} unable\ to\ create\ the\ data\ volume[uuid\:\ %s]\ on\ a\ local\ primary\ storage[uuid\:%s],\ because\ the\ hostUuid\ is\ not\ specified. = unable to create the data volume[uuid: {0}] on a local primary storage[uuid:{1}], because the hostUuid is not specified. No\ Host\ state\ is\ Enabled,\ Please\ check\ the\ availability\ of\ the\ host = No Host state is Enabled, Please check the availability of the host host[uuid\:%s]\ cannot\ access\ local\ storage[uuid\:%s],\ maybe\ it\ is\ detached = host[uuid:{0}] cannot access local storage[uuid:{1}], maybe it is detached @@ -1826,7 +2153,9 @@ resource[uuid\:%s,\ type\:\ %s]\ is\ not\ on\ the\ local\ primary\ storage[uuid\ resource[uuid\:%s,\ type\:\ %s]\ on\ the\ local\ primary\ storage[uuid\:%s]\ maps\ to\ multiple\ hypervisor%s = resource[uuid:{0}, type: {1}] on the local primary storage[uuid:{2}] maps to multiple hypervisor{3} cannot\ attach\ ISO\ to\ a\ primary\ storage[uuid\:%s]\ which\ is\ disabled = cannot attach ISO to a primary storage[uuid:{0}] which is disabled host(s)[uuids\:\ %s]\ volume\ locate\ is\ not\ Connected. = host(s)[uuids: {0}] volume locate is not Connected. +volume[uuid\:%s]\ has\ reference\ volume[%s],\ can\ not\ change\ volume\ type\ before\ flatten\ them\ and\ their\ descendants = volume[uuid:{0}] has reference volume[{1}], can not change volume type before flatten them and their descendants There\ is\ no\ LocalStorage\ primary\ storage[state\=%s,status\=%s]\ on\ the\ cluster[%s],\ when\ the\ cluster\ mounts\ multiple\ primary\ storage,\ the\ system\ uses\ the\ local\ primary\ storage\ by\ default.\ Check\ the\ state/status\ of\ primary\ storage\ and\ make\ sure\ they\ have\ been\ attached\ to\ clusters = There is no LocalStorage primary storage[state={0},status={1}] on the cluster[{2}], when the cluster mounts multiple primary storage, the system uses the local primary storage by default. Check the state/status of primary storage and make sure they have been attached to clusters +the\ type\ of\ primary\ storage[uuid\:%s]\ chosen\ is\ not\ local\ storage,\ check\ if\ the\ resource\ can\ be\ created\ on\ other\ storage\ when\ cluster\ has\ attached\ local\ primary\ storage = the type of primary storage[uuid:{0}] chosen is not local storage, check if the resource can be created on other storage when cluster has attached local primary storage The\ cluster\ mounts\ multiple\ primary\ storage[%s(%s),\ other\ non-LocalStorage\ primary\ storage],\ primaryStorageUuidForDataVolume\ cannot\ be\ specified\ %s = The cluster mounts multiple primary storage[{0}({1}), other non-LocalStorage primary storage], primaryStorageUuidForDataVolume cannot be specified {2} The\ cluster[uuid\=%s]\ mounts\ multiple\ primary\ storage[LocalStorage,\ other\ non-LocalStorage\ primary\ storage],\ You\ must\ specify\ the\ primary\ storage\ where\ the\ root\ disk\ is\ located = The cluster[uuid={0}] mounts multiple primary storage[LocalStorage, other non-LocalStorage primary storage], You must specify the primary storage where the root disk is located The\ cluster[uuid\=%s]\ mounts\ multiple\ primary\ storage[LocalStorage,\ other\ non-LocalStorage\ primary\ storage],\ You\ must\ specify\ the\ primary\ storage\ where\ the\ data\ disk\ is\ located = The cluster[uuid={0}] mounts multiple primary storage[LocalStorage, other non-LocalStorage primary storage], You must specify the primary storage where the data disk is located @@ -1834,10 +2163,17 @@ no\ LocalStorageBackupStorageMediator\ supporting\ hypervisor[%s]\ and\ backup\ creation\ rely\ on\ image\ cache[uuid\:%s,\ locate\ host\ uuids\:\ [%s]],\ cannot\ create\ other\ places. = creation rely on image cache[uuid:{0}, locate host uuids: [{1}]], cannot create other places. local\ storage\ doesn't\ support\ live\ migration\ for\ hypervisor[%s] = local storage doesn''t support live migration for hypervisor[{0}] Can't\ attach\ volume\ to\ VM,\ no\ qualified\ cluster = Can''t attach volume to VM, no qualified cluster +cannot\ attach\ the\ data\ volume[uuid\:%s]\ to\ the\ vm[uuid\:%s].\ Both\ vm's\ root\ volume\ and\ the\ data\ volume\ are\ on\ local\ primary\ storage,\ but\ they\ are\ on\ different\ hosts.\ The\ root\ volume[uuid\:%s]\ is\ on\ the\ host[uuid\:%s]\ but\ the\ data\ volume[uuid\:\ %s]\ is\ on\ the\ host[uuid\:\ %s] = cannot attach the data volume[uuid:{0}] to the vm[uuid:{1}]. Both vm''s root volume and the data volume are on local primary storage, but they are on different hosts. The root volume[uuid:{2}] is on the host[uuid:{3}] but the data volume[uuid: {4}] is on the host[uuid: {5}] +the\ data\ volume[name\:%s,\ uuid\:%s]\ is\ on\ the\ local\ storage[uuid\:%s];\ however,the\ host\ on\ which\ the\ data\ volume\ is\ has\ been\ deleted.\ Unable\ to\ recover\ this\ volume = the data volume[name:{0}, uuid:{1}] is on the local storage[uuid:{2}]; however,the host on which the data volume is has been deleted. Unable to recover this volume +unable\ to\ recover\ the\ vm[uuid\:%s,\ name\:%s].\ The\ vm's\ root\ volume\ is\ on\ the\ local\ storage[uuid\:%s];\ however,\ the\ host\ on\ which\ the\ root\ volume\ is\ has\ been\ deleted = unable to recover the vm[uuid:{0}, name:{1}]. The vm''s root volume is on the local storage[uuid:{2}]; however, the host on which the root volume is has been deleted +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ data\ volumes\ on\ local\ storage.\ Need\ detach\ all\ data\ volumes\ first. = unable to live migrate vm[uuid:{0}] with data volumes on local storage. Need detach all data volumes first. +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ local\ storage.\ Only\ linux\ guest\ is\ supported.\ Current\ platform\ is\ [%s] = unable to live migrate vm[uuid:{0}] with local storage. Only linux guest is supported. Current platform is [{1}] +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ ISO\ on\ local\ storage.\ Need\ detach\ all\ ISO\ first. = unable to live migrate vm[uuid:{0}] with ISO on local storage. Need detach all ISO first. +To\ create\ data\ volume\ on\ the\ local\ primary\ storage,\ you\ must\ specify\ the\ host\ that\ the\ data\ volume\ is\ going\ to\ be\ created\ using\ the\ system\ tag\ [%s] = To create data volume on the local primary storage, you must specify the host that the data volume is going to be created using the system tag [{0}] the\ host[uuid\:%s]\ doesn't\ belong\ to\ the\ local\ primary\ storage[uuid\:%s] = the host[uuid:{0}] doesn''t belong to the local primary storage[uuid:{1}] the\ local\ primary\ storage[uuid\:%s]\ has\ no\ hosts\ with\ enough\ disk\ capacity[%s\ bytes]\ required\ by\ the\ disk\ offering[uuid\:%s] = the local primary storage[uuid:{0}] has no hosts with enough disk capacity[{1} bytes] required by the disk offering[uuid:{2}] +the\ image[uuid\:%s,\ name\:\ %s]\ is\ not\ available\ to\ download\ on\ any\ backup\ storage\:\\n1.\ check\ if\ image\ is\ in\ status\ of\ Deleted\\n2.\ check\ if\ the\ backup\ storage\ on\ which\ the\ image\ is\ shown\ as\ Ready\ is\ attached\ to\ the\ zone[uuid\:%s] = the image[uuid:{0}, name: {1}] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:{2}] root\ image\ has\ been\ deleted,\ cannot\ reimage\ now = root image has been deleted, cannot reimage now -the\ volume[uuid;%s]\ is\ attached\ to\ a\ VM[uuid\:%s]\ which\ is\ in\ state\ of\ %s,\ cannot\ do\ the\ snapshot\ merge = the volume[uuid;{0}] is attached to a VM[uuid:{1}] which is in state of {2}, cannot do the snapshot merge why\ volume[uuid\:%s,\ installPath\:%s]\ not\ in\ directory\ %s = why volume[uuid:{0}, installPath:{1}] not in directory {2} cannot\ find\ flag\ file\ [%s]\ on\ host\ [%s],\ because\:\ %s = cannot find flag file [{0}] on host [{1}], because: {2} cannot\ create\ flag\ file\ [%s]\ on\ host\ [%s],\ because\:\ %s = cannot create flag file [{0}] on host [{1}], because: {2} @@ -1846,9 +2182,13 @@ unable\ to\ create\ empty\ snapshot\ volume[name\:%s,\ installpath\:\ %s]\ on\ k unable\ to\ create\ an\ empty\ volume[uuid\:%s,\ name\:%s]\ on\ the\ kvm\ host[uuid\:%s] = unable to create an empty volume[uuid:{0}, name:{1}] on the kvm host[uuid:{2}] failed\ to\ download\ bits\ from\ the\ SFTP\ backup\ storage[hostname\:%s,\ path\:\ %s]\ to\ the\ local\ primary\ storage[uuid\:%s,\ path\:\ %s],\ %s = failed to download bits from the SFTP backup storage[hostname:{0}, path: {1}] to the local primary storage[uuid:{2}, path: {3}], {4} failed\ to\ upload\ bits\ from\ the\ local\ storage[uuid\:%s,\ path\:%s]\ to\ the\ SFTP\ backup\ storage[hostname\:%s,\ path\:%s],\ %s = failed to upload bits from the local storage[uuid:{0}, path:{1}] to the SFTP backup storage[hostname:{2}, path:{3}], {4} +the\ required\ host[uuid\:%s]\ cannot\ satisfy\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes],\ or\ doesn't\ belong\ to\ a\ local\ primary\ storage\ satisfying\ conditions[state\:\ %s,\ status\:\ %s],\ or\ its\ cluster\ doesn't\ attach\ to\ any\ local\ primary\ storage = the required host[uuid:{0}] cannot satisfy conditions[state: {1}, status: {2}, size > {3} bytes], or doesn''t belong to a local primary storage satisfying conditions[state: {4}, status: {5}], or its cluster doesn''t attach to any local primary storage +no\ local\ primary\ storage\ in\ zone[uuid\:%s]\ can\ satisfy\ conditions[state\:\ %s,\ status\:\ %s]\ or\ contain\ hosts\ satisfying\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes] = no local primary storage in zone[uuid:{0}] can satisfy conditions[state: {1}, status: {2}] or contain hosts satisfying conditions[state: {3}, status: {4}, size > {5} bytes] +no\ local\ primary\ storage\ can\ satisfy\ conditions[state\:\ %s,\ status\:\ %s]\ or\ contain\ hosts\ satisfying\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes] = no local primary storage can satisfy conditions[state: {0}, status: {1}] or contain hosts satisfying conditions[state: {2}, status: {3}, size > {4} bytes] {the\ physical\ capacity\ usage\ of\ the\ host[uuid\:%s]\ has\ exceeded\ the\ threshold[%s]} = '{the physical capacity usage of the host[uuid:{0}'] has exceeded the threshold[{1}]} failed\ allocate\ localstorage = failed allocate localstorage cannot\ reserve\ enough\ space\ for\ primary\ storage[uuid\:\ %s]\ on\ host[uuid\:\ %s],\ not\ enough\ physical\ capacity = cannot reserve enough space for primary storage[uuid: {0}] on host[uuid: {1}], not enough physical capacity +host[uuid\:\ %s]\ of\ local\ primary\ storage[uuid\:\ %s]\ doesn't\ have\ enough\ capacity[current\:\ %s\ bytes,\ needed\:\ %s] = host[uuid: {0}] of local primary storage[uuid: {1}] doesn''t have enough capacity[current: {2} bytes, needed: {3}] cannot\ find\ any\ host\ which\ has\ resource[uuid\:%s] = cannot find any host which has resource[uuid:{0}] Resource[uuid\:%s]\ can\ only\ be\ operated\ on\ host[uuid\:%s],\ but\ the\ host\ has\ been\ deleted = Resource[uuid:{0}] can only be operated on host[uuid:{1}], but the host has been deleted @@ -1928,7 +2268,12 @@ the\ vip[uuid\:%s]\ already\ has\ bound\ to\ other\ service[%s] = the vip[uuid:{ Current\ port\ range[%s,\ %s]\ is\ conflicted\ with\ system\ service\ port\ range\ [%s,\ %s]\ with\ vip[uuid\:\ %s]\ protocol\:\ %s\ = Current port range[{0}, {1}] is conflicted with system service port range [{2}, {3}] with vip[uuid: {4}] protocol: {5} Current\ port\ range[%s,\ %s]\ is\ conflicted\ with\ used\ port\ range\ [%s,\ %s]\ with\ vip[uuid\:\ %s]\ protocol\:\ %s\ = Current port range[{0}, {1}] is conflicted with used port range [{2}, {3}] with vip[uuid: {4}] protocol: {5} +# In Module: memory-balloon +no\ data\ of\ %s\ found\ on\ host[%s] = no data of {0} found on host[{1}] + # In Module: mevoco +More\ than\ one\ BackupStorage\ on\ the\ same\ host\ identified\ by\ hostname.\ There\ has\ been\ a\ SftpBackupStorage\ [hostname\:%s]\ existing.\ The\ BackupStorage\ type\ to\ be\ added\ is\ %s.\ = More than one BackupStorage on the same host identified by hostname. There has been a SftpBackupStorage [hostname:{0}] existing. The BackupStorage type to be added is {1}. +More\ than\ one\ BackupStorage\ on\ the\ same\ host\ identified\ by\ hostname.\ There\ has\ been\ an\ ImageStoreBackupStorage\ [hostname\:%s]\ existing.\ The\ BackupStorage\ type\ to\ be\ added\ is\ %s.\ = More than one BackupStorage on the same host identified by hostname. There has been an ImageStoreBackupStorage [hostname:{0}] existing. The BackupStorage type to be added is {1}. VM\ [uuid\:\ %s]\ has\ already\ been\ added\ to\ affinityGroup\ [uuid\:\ %s] = VM [uuid: {0}] has already been added to affinityGroup [uuid: {1}] There\ are\ other\ VMs\ on\ this\ host\ [uuid\:\ %s]\ belonging\ to\ same\ affinityGroup\ [%s] = There are other VMs on this host [uuid: {0}] belonging to same affinityGroup [{1}] affinityGroup\ [uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = affinityGroup [uuid:{0}] reserve host [uuid:{1}] for vm [uuid: {2}] failed @@ -1949,14 +2294,18 @@ can\ not\ detach\ interfaces\ repeatedly\ in\ a\ bond[%s]. = can not detach inte unable\ to\ find\ bonding[uuid\=%s] = unable to find bonding[uuid={0}] cannot\ delete\ bonding\ corresponding\ to\ the\ management\ network = cannot delete bonding corresponding to the management network cannot\ delete\ bonding\ configured\ with\ vtep\ ip = cannot delete bonding configured with vtep ip +cannot\ assign\ xmit_hash_policy\ [%s]\ for\ mode\ [%s],\ because\ only\ mode\ 802.3ad\ support\ specifying\ different\ xmit_hash_policys = cannot assign xmit_hash_policy [{0}] for mode [{1}], because only mode 802.3ad support specifying different xmit_hash_policys xmit_hash_policy\ for\ mode\ [%s]\ should\ not\ be\ null = xmit_hash_policy for mode [{0}] should not be null interface\ in\ slaveNames[%s]\ does\ not\ exist\ on\ the\ hosts = interface in slaveNames[{0}] does not exist on the hosts there\ is\ no\ interface[%s]\ on\ host[uuid\:%s] = there is no interface[{0}] on host[uuid:{1}] there\ is\ no\ slave\ interface\ on\ the\ host[uuid\:%s] = there is no slave interface on the host[uuid:{0}] can\ not\ have\ interfaces\ in\ a\ bond\ which\ is\ not\ on\ the\ same\ host[%s]. = can not have interfaces in a bond which is not on the same host[{0}]. cannot\ bind\ with\ interface\ corresponding\ to\ the\ management\ network. = cannot bind with interface corresponding to the management network. +bonding\ card\ can\ not\ have\ occupied\ interfaces,\ which\ was\ already\ been\ used\ by\ bonding[uuid\:%s] = bonding card can not have occupied interfaces, which was already been used by bonding[uuid:{0}] +bonding\ card\ can\ not\ have\ interfaces\ that\ has\ been\ used\ as\ a\ network\ bridge,\ which\ was\ already\ been\ used\ by\ host[%s] = bonding card can not have interfaces that has been used as a network bridge, which was already been used by host[{0}] bonding\ card\ can\ not\ have\ interfaces\ that\ has\ been\ pass-through = bonding card can not have interfaces that has been pass-through bonding\ card\ can\ not\ have\ interfaces\ with\ different\ speed,\ which\ is\ on\ the\ host[%s] = bonding card can not have interfaces with different speed, which is on the host[{0}] +bonding\ card\ can\ not\ have\ [%s]\ interfaces,it\ must\ be\ the\ number\ between[1~8] = bonding card can not have [{0}] interfaces,it must be the number between[1~8] [%s]\ bonding\ card\ can\ not\ have\ [%s]\ interfaces,\ it\ must\ be\ the\ number\ between[1~8] = [{0}] bonding card can not have [{1}] interfaces, it must be the number between[1~8] [%s]\ bonding\ can\ not\ have\ [%s]\ interfaces,\ it\ must\ be\ the\ number\ between[1~2] = [{0}] bonding can not have [{1}] interfaces, it must be the number between[1~2] failed\ to\ add\ linux\ bonding\ to\ host[uuid\:%s]\ \:\ %s = failed to add linux bonding to host[uuid:{0}] : {1} @@ -1989,7 +2338,9 @@ can\ not\ set\ management\ network\ on\ bonding,\ because\ management\ is\ the\ no\ available\ network\ interface\ on\ the\ host\ to\ start\ the\ vm = no available network interface on the host to start the vm vm\ security\ level\ not\ consistent\ with\ vms\ running\ on\ host = vm security level not consistent with vms running on host fail\ to\ update\ iscsi\ initiator\ name\ of\ host[uuid\:%s] = fail to update iscsi initiator name of host[uuid:{0}] +failed\ to\ allocate\ pci\ device\ on\ host[uuid\:%s],\ because\ there\ are\ not\ enough\ pci\ devices\ available = failed to allocate pci device on host[uuid:{0}], because there are not enough pci devices available networkInterface[name\:%s]\ of\ host[uuid\:%s]\ can\ not\ find = networkInterface[name:{0}] of host[uuid:{1}] can not find +only\ support\ do\ live\ snapshot\ on\ vm\ state[%s],\ but\ vm\ is\ on\ [%s]\ state = only support do live snapshot on vm state[{0}], but vm is on [{1}] state primary\ storage\ type\ doesn't\ support\ sync\ qos\ from\ host = primary storage type doesn''t support sync qos from host primary\ storage\ type\ doesn't\ support\ set\ qos = primary storage type doesn''t support set qos host[uuid\:%s]\ becomes\ power\ off,\ send\ notify = host[uuid:{0}] becomes power off, send notify @@ -2033,6 +2384,8 @@ failed\ to\ post-handle\ vf\ nic\ after\ migrate\ vm[uuid\:%s]\ to\ host[uuid\:% failed\ to\ delete\ vHost\ User\ Client\ in\ host[uuid\:%s]\ for\ vm[uuid\:%s]\ \:\ %s = failed to delete vHost User Client in host[uuid:{0}] for vm[uuid:{1}] : {2} failed\ to\ generate\ vHost\ User\ Client\ in\ host[uuid\:%s]\ for\ vm[uuid\:%s]\ \:\ %s = failed to generate vHost User Client in host[uuid:{0}] for vm[uuid:{1}] : {2} cannot\ generate\ vhost\ user\ client\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = cannot generate vhost user client for vm[uuid:{0}] on the destination host[uuid:{1}] +could\ not\ ungenerate\ pci\ device[uuid\:%s],\ becausethere\ are\ another\ l2[uuid\:%s]\ use\ the\ physical\ network\ interface\ attached\ to\ cluster = could not ungenerate pci device[uuid:{0}], becausethere are another l2[uuid:{1}] use the physical network interface attached to cluster +could\ not\ generate\ pci\ device[uuid\:%s],\ becausethere\ are\ another\ l2[uuid\:%s]\ use\ the\ physical\ network\ interface\ attached\ to\ cluster = could not generate pci device[uuid:{0}], because there are another l2[uuid:{1}] use the physical network interface attached to cluster only\ %s\ support\ vdpa = only {0} support vdpa cluster[uuid\:%s]\ do\ not\ support\ ovs-dpdk = cluster[uuid:{0}] do not support ovs-dpdk l2\ network[uuid\:%s]\ in\ host[uuid\:%s]\ is\ not\ sr-iov\ virtualized = l2 network[uuid:{0}] in host[uuid:{1}] is not sr-iov virtualized @@ -2042,6 +2395,7 @@ cannot\ generate\ vdpa\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] release\ vdpa\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = release vdpa for vm[uuid:{0}] on the destination host[uuid:{1}] restore\ vdpa\ for\ vm[uuid\:%s]\ from\ the\ destination\ host[uuid\:%s] = restore vdpa for vm[uuid:{0}] from the destination host[uuid:{1}] not\ dest\ host\ found\ in\ db,\ can't\ send\ change\ password\ cmd\ to\ the\ host! = not dest host found in db, can''t send change password cmd to the host! +not\ account\ preference\ found,\ \ send\ change\ password\ cmd\ to\ the\ host! = not account preference found, send change password cmd to the host! fail\ to\ attach\ virtio\ driver\ because\ read\ md5\ of\ file[%s]\ fail\ in\ mn[uuid\:%s]\:\ file\ not\ found\ on\ classpath = fail to attach virtio driver because read md5 of file[{0}] fail in mn[uuid:{1}]: file not found on classpath fail\ to\ attach\ virtio\ driver\ because\ of\ invalid\ md5\ of\ file[%s]\ in\ mn[uuid\:%s] = fail to attach virtio driver because of invalid md5 of file[{0}] in mn[uuid:{1}] fail\ to\ attach\ virtio\ driver\ because\ read\ md5\ of\ file[%s]\ fail\ in\ mn[uuid\:%s]\:\ %s = fail to attach virtio driver because read md5 of file[{0}] fail in mn[uuid:{1}]: {2} @@ -2051,16 +2405,23 @@ state\ of\ vm[uuid\:%s]\ is\ not\ in\ Running\ state,\ can\ not\ sync\ clock = s hot\ plug\ is\ not\ turned\ off,can\ not\ open\ vm\ numa = hot plug is not turned off,can not open vm numa vm[uuid\:\ %s]'s\ state\ is\ not\ Stopped\ now,\ cannot\ operate\ 'changevmimage'\ action = vm[uuid: {0}]''s state is not Stopped now, cannot operate ''changevmimage'' action vm[uuid\:%s]\ cluster\ uuid\ is\ null,\ cannot\ change\ image\ for\ it = vm[uuid:{0}] cluster uuid is null, cannot change image for it +vm[uuid\:%s]\ is\ in\ cluster[uuid\:%s],\ but\ there\ is\ no\ available\ host\ in\ the\ cluster,\ cannot\ change\ image\ for\ the\ vm = vm[uuid:{0}] is in cluster[uuid:{1}], but there is no available host in the cluster, cannot change image for the vm +unable\ to\ allocate\ hosts,\ no\ host\ meets\ the\ following\ conditions\:\ clusterUuid\=%s\ hostUuid\=%s\ cpu\=%d\ memoryCapacity\=%d\ L3NetworkUuids\=%s = unable to allocate hosts, no host meets the following conditions: clusterUuid={0} hostUuid={1} cpu={2} memoryCapacity={3} L3NetworkUuids={4} +can\ not\ find\ backup\ storage,\ unable\ to\ commit\ volume\ snapshot[psUuid\:%s]\ as\ image,\ destination\ required\ PS\ uuid\:%s = can not find backup storage, unable to commit volume snapshot[psUuid:{0}] as image, destination required PS uuid:{1} direction\ must\ be\ set\ to\ in\ or\ out = direction must be set to in or out inboundBandwidth\ must\ be\ set\ no\ more\ than\ %s. = inboundBandwidth must be set no more than {0}. outboundBandwidth\ must\ be\ set\ no\ more\ than\ %s. = outboundBandwidth must be set no more than {0}. vm\ [%s]'\ state\ must\ be\ Running\ or\ Paused\ to\ sync\ nic\ qos = vm [{0}]'' state must be Running or Paused to sync nic qos vm\ [%s]'s\ HostUuid\ is\ null,\ cannot\ sync\ nic\ qos = vm [{0}]''s HostUuid is null, cannot sync nic qos +not\ dest\ host\ found\ in\ db\ by\ uuid\:\ %s,\ can't\ send\ change\ password\ cmd\ to\ the\ host! = not dest host found in db by uuid: {0}, can''t send change password cmd to the host! state\ is\ not\ correct\ while\ change\ password. = state is not correct while change password. templated\ vm[uuid\:\ %s]\ cannot\ be\ create\ from\ vm\ with\ shareable\ volume[uuids\:\ %s] = templated vm[uuid: {0}] cannot be create from vm with shareable volume[uuids: {1}] +failed\ to\ convert\ vm\ to\ templated\ vm,\ because\ the\ vm\ has\ scheduled\ jobs\ [%s] = failed to convert vm to templated vm, because the vm has scheduled jobs [{0}] The\ number\ of\ data\ volumes\ exceeds\ the\ limit[num\:\ %s],\ please\ reduce\ the\ number\ of\ data\ volumes\ during\ vm\ creation. = The number of data volumes exceeds the limit[num: {0}], please reduce the number of data volumes during vm creation. Can\ not\ set\ security\ level\ to\ not\ %s\ vm\ [uuid\:%s] = Can not set security level to not {0} vm [uuid:{1}] +can\ not\ set\ primaryStorageUuidForRootVolume\ or\ primaryStorageUuidForDataVolume\ or\ rootVolumeSystemTags\ or\ dataVolumeSystemTags\ when\ diskAOs\ is\ not\ empty = can not set primaryStorageUuidForRootVolume or primaryStorageUuidForDataVolume or rootVolumeSystemTags or dataVolumeSystemTags when diskAOs is not empty The\ operation\ only\ allows\ on\ user\ vm = The operation only allows on user vm +there\ are\ not\ enough\ capacity\ for\ full\ vm\ clone\ to\ vm[uuid\:\ %s],\ volumes[uuid\:\ %s]\ on\ primary\ storage[uuid\:\ %s]\ required\:\ %s\ bytes,\ current\ available\ capacity\ is\ %s\ bytes = there are not enough capacity for full vm clone to vm[uuid: {0}], volumes[uuid: {1}] on primary storage[uuid: {2}] required: {3} bytes, current available capacity is {4} bytes The\ nic\ [%s%s]\ is\ not\ mounted\ on\ the\ VM = The nic [{0}{1}] is not mounted on the VM The\ operation\ only\ allows\ on\ user\ vm\ = The operation only allows on user vm The\ operation\ only\ allows\ when\ vm\ [%s]\ state\ is\ stopped\ = The operation only allows when vm [{0}] state is stopped @@ -2078,6 +2439,7 @@ the\ nic\ can't\ apply\ Qos\ with\ the\ port\ mirror\ service\ at\ same\ time. = nic\ id\:\ %s\ does\ not\ exist... = nic id: {0} does not exist... The\ 'uuids'\ parameter\ must\ belong\ to\ the\ VmInstanceVO\ or\ HostVO = The ''uuids'' parameter must belong to the VmInstanceVO or HostVO resource[uuids\:%s]\ is\ not\ owned\ by\ account[uuid\:%s] = resource[uuids:{0}] is not owned by account[uuid:{1}] +the\ cache\ of\ a\ templated\ vmInstance[uuid\:%s]\ can\ contain\ only\ one\ or\ zero\ snapshot\ groups.\ the\ current\ number\ of\ snapshot\ groups\ is\ %d. = the cache of a templated vmInstance[uuid:{0}] can contain only one or zero snapshot groups. the current number of snapshot groups is {1}. the\ templated\ vmInstance[uuid\:%s]\ is\ not\ exist = the templated vmInstance[uuid:{0}] is not exist password\ length\ must\ be\ [%s-%s] = password length must be [{0}-{1}] password\ does\ not\ match\ numbers,\ uppercase\ and\ lowercase,\ and\ special\ character\ combinations = password does not match numbers, uppercase and lowercase, and special character combinations @@ -2093,14 +2455,33 @@ invalid\ cpu\ set\ [%s] = invalid cpu set [{0}] the\ host[uuid\:%s]\ already\ attached\ to\ host\ scheduling\ group[uuid\:%s] = the host[uuid:{0}] already attached to host scheduling group[uuid:{1}] host\ clusterUuid\ is\ null = host clusterUuid is null hosts\ that\ you\ can\ add\ to\ a\ host\ scheduling\ group\ must\ be\ enabled\ and\ connected\ to\ the\ MN. = hosts that you can add to a host scheduling group must be enabled and connected to the MN. +unmatched\ zone\ detected,\ host[uuid\:\ %s,\ zone\ uuid\:\ %s]'s\ zone\ is\ different\ from\ host\ sheduling\ rule\ group[uuid\:\ %s,\ zone\ uuid\:\ %s] = unmatched zone detected, host[uuid: {0}, zone uuid: {1}]''s zone is different from host sheduling rule group[uuid: {2}, zone uuid: {3}] vm[uuid\:%s]\ already\ attached\ to\ vm\ scheduling\ group[uuid\:%s] = vm[uuid:{0}] already attached to vm scheduling group[uuid:{1}] +unmatched\ zone\ detected,\ vm[uuid\:\ %s,\ zone\ uuid\:\ %s]'s\ zone\ is\ different\ from\ vm\ sheduling\ rule\ group[uuid\:\ %s,\ zone\ uuid\:\ %s] = unmatched zone detected, vm[uuid: {0}, zone uuid: {1}]''s zone is different from vm sheduling rule group[uuid: {2}, zone uuid: {3}] vm\ can\ change\ its\ vm\ scheduling\ group\ only\ in\ state\ [%s,%s],\ but\ vm\ is\ in\ state\ [%s] = vm can change its vm scheduling group only in state [{0},{1}], but vm is in state [{2}] cannot\ operate\ vpc\ vm\ scheduling\ group = cannot operate vpc vm scheduling group zoneUuid\ is\ not\ null = zoneUuid is not null the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ Affinitive\ to\ Hosts\ scheduling\ policy\ attached = the vm scheduling group has already had a vms Affinitive to Hosts scheduling policy attached, you cannot attach a vm antiaffinity from each other scheduling rule to the group again. +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ antiaffinity\ from\ hosts\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vm\ antiaffinity\ from\ Each\ Other\ scheduling\ rule\ to\ the\ group = the vm scheduling group has already had a vms antiaffinity from hosts scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vm antiaffinity from Each Other scheduling rule to the group +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vms\ antiaffinity\ from\ Hosts\ scheduling\ policy\ to\ the\ group. = the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms antiaffinity from Hosts scheduling policy to the group. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ attaching\ another\ one\ is\ not\ allowed. = the vm scheduling group[uuid:{0}] has already had a vm antiaffinity from each other scheduling rule attached. attaching another one is not allowed. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ rule\ attached.\ Attaching\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ is\ not\ allowed. = the vm scheduling group[uuid:{0}] has already had a vm affinitive to each other scheduling rule attached. Attaching a vm antiaffinity from each other scheduling rule is not allowed. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ rule\ attached.attaching\ another\ one\ is\ not\ allowed. = the vm scheduling group[uuid:{0}] has already had a vm affinitive to each other scheduling rule attached.attaching another one is not allowed. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ exclusive\ from\ each\ other\ scheduling\ rule\ attached.\ Attaching\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ policy\ is\ not\ allowed. = the vm scheduling group[uuid:{0}] has already had a vm exclusive from each other scheduling rule attached. Attaching a vm affinitive to each other scheduling policy is not allowed. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ executed\ exclusive\ vm\ or\ affinitive\ vm\ scheduling\ policy\ attached.\ you\ cannot\ attach\ either\ of\ the\ two\ scheduling\ policies\ that\ require\ execution\ to\ the\ group\ again = the vm scheduling group[uuid:{0}] has already had a executed exclusive vm or affinitive vm scheduling policy attached. you cannot attach either of the two scheduling policies that require execution to the group again +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vms\ affinitive\ to\ hosts\ scheduling\ rule\ attached.\ you\ cannot\ attach\ another\ one\ to\ the\ group\ again. = the vm scheduling group[uuid:{0}] has already had a vms affinitive to hosts scheduling rule attached. you cannot attach another one to the group again. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ antiaffinity\ from\ host\ scheduling\ rule\ attached.\ you\ cannot\ attach\ a\ vms\ affinitive\ to\ host\ scheduling\ rule\ to\ the\ group. = the vm scheduling group[uuid:{0}] has already had a vm antiaffinity from host scheduling rule attached. you cannot attach a vms affinitive to host scheduling rule to the group. +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ hosts\ scheduling\ rule\ attached.\ you\ cannot\ attach\ a\ vm\ antiaffinity\ from\ hosts\ scheduling\ rule\ to\ the\ group. = the vm scheduling group[uuid:{0}] has already had a vm affinitive to hosts scheduling rule attached. you cannot attach a vm antiaffinity from hosts scheduling rule to the group. +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vms\ affinitive\ to\ hosts\ scheduling\ policy\ to\ the\ group. = the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms affinitive to hosts scheduling policy to the group. can\ not\ satisfied\ vm\ scheduling\ rule\ group\ conditions = can not satisfied vm scheduling rule group conditions vm\ scheduling\ group[uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = vm scheduling group[uuid:{0}] reserve host [uuid:{1}] for vm [uuid: {2}] failed +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],which\ does\ not\ comply\ with\ the\ scheduling\ rule\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = vm[uuid:{0}] is now running on host[uuid:{1}],which does not comply with the scheduling rule associated with vm scheduling group[uuid:{2}]. hostGroup[uuid\:%s]\ is\ no\ host = hostGroup[uuid:{0}] is no host +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],\ which\ does\ not\ comply\ with\ the\ scheduling\ rule[%s]\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = vm[uuid:{0}] is now running on host[uuid:{1}], which does not comply with the scheduling rule[{2}] associated with vm scheduling group[uuid:{3}]. +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],which\ does\ not\ comply\ with\ the\ scheduling\ rule[%s]\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = vm[uuid:{0}] is now running on host[uuid:{1}],which does not comply with the scheduling rule[{2}] associated with vm scheduling group[uuid:{3}]. +cannot\ find\ the\ host\ scheduling\ group[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find the host scheduling group[uuid:{0}], it may have been deleted +cannot\ find\ the\ vm\ scheduling\ rule[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find the vm scheduling rule[uuid:{0}], it may have been deleted +cannot\ find\ the\ vm\ scheduling\ group[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find the vm scheduling group[uuid:{0}], it may have been deleted failed\ to\ parse\ API\ message\:\ can\ not\ parse\ encryption\ param\ with\ type\ %s = failed to parse API message: can not parse encryption param with type {0} failed\ to\ parse\ API\ message\:\ cipher\ text\ can\ not\ be\ parsed,\ type\=%s = failed to parse API message: cipher text can not be parsed, type={0} failed\ to\ parse\ API\ message\:\ found\ %d\ encryption\ param\ system\ tags,\ expect\ 1 = failed to parse API message: found {0} encryption param system tags, expect 1 @@ -2141,6 +2522,7 @@ enter\ the\ new\ value\ here,\ empty\ means\ no\ change. = enter the new value h some\ error\ happened,\ skip\ management\ node\ power\ off = some error happened, skip management node power off Failed\ to\ set\ security\ level,\ because\ security\ level\ is\ disabled. = Failed to set security level, because security level is disabled. Unknown\ security\ level\ code[%s],\ supported\ values\ are\ %s = Unknown security level code[{0}], supported values are {1} +Parse\ license\ error,\\n1.\ check\ your\ private\ key\ and\ application\ code\ is\ correct\\n2.\ check\ your\ license\ is\ not\ corrupted\\n3.\ use\ zstack-ctl\ clear_license\ to\ clear\ your\ licenses\ and\ try\ to\ reinstall\\n = Parse license error,\\n1. check your private key and application code is correct\\n2. check your license is not corrupted\\n3. use zstack-ctl clear_license to clear your licenses and try to reinstall\\n the\ licenseRequestCode\ is\ illegal = the licenseRequestCode is illegal Unexpected\ decoded\ license\ file\ length\:\ %d = Unexpected decoded license file length: {0} Decode\ fail\ because\ %s = Decode fail because {0} @@ -2167,6 +2549,8 @@ issue\ date\ of\ platform\ license\ is\ earlier\ than\ the\ existing\ license\ i add-on\ license\ is\ not\ support\ when\ license\ is\ Community\ /\ Trial = add-on license is not support when license is Community / Trial failed\ to\ update\ license = failed to update license not\ supported\:\ delete\ license[%s]\ from\ USB-key = not supported: delete license[{0}] from USB-key +Hybrid\ platform\ license\ is\ already\ in\ use.\ You\ should\ remove\ Hybrid\ platform\ license\ and\ hybird\ add-ons\ license\ at\ the\ same\ timeby\ DeleteLicenseAction\ with\ Hybrid\ license\ UUID[uuid\=%s] = Hybrid platform license is already in use. You should remove Hybrid platform license and hybird add-ons license at the same timeby DeleteLicenseAction with Hybrid license UUID[uuid={0}] +The\ system's\ thumbprint\ has\ changed.\\n\ Detailed\ errors\:\ %s.\\n\ If\ you\ are\ setting\ up\ a\ new\ system\ or\ changing\ an\ existing\ system,\ please\ follow\ the\ commands\ below\:\\n\ 1.\ run\ `zstack-ctl\ clear_license`\ to\ clear\ and\ backup\ old\ license\ files\\n\ \ \ \ or\ delete\ the\ license\ file\ on\ path\ %s\ 2.\ contact\ sales@zstack.io\ to\ apply\ a\ license;\\n\ 3.\ run\ `zstack-ctl\ install_license\ -f\ path/to/your/license`;\\n\ 4.\ run\ `zstack-ctl\ start`\ to\ start\ management\ node.\\n = The system''s thumbprint has changed.\\n Detailed errors: {0}.\\n If you are setting up a new system or changing an existing system, please follow the commands below:\\n 1. run `zstack-ctl clear_license` to clear and backup old license files\\n or delete the license file on path {1} 2. contact sales@zstack.io to apply a license;\\n 3. run `zstack-ctl install_license -f path/to/your/license`;\\n 4. run `zstack-ctl start` to start management node.\\n Unexpected\ thumbprint = Unexpected thumbprint Platform\ license\ expired. = Platform license expired. Found\ Xinchuang\ host,\ but\ the\ type\ of\ license\ does\ not\ match. = Found Xinchuang host, but the type of license does not match. @@ -2187,6 +2571,7 @@ invalid\ volume\ IOPS[%s]\ is\ larger\ than\ %d = invalid volume IOPS[{0}] is la L3\ network[uuid\:%s]\ not\ found.\ Please\ correct\ your\ system\ tag[%s]\ of\ static\ IP = L3 network[uuid:{0}] not found. Please correct your system tag[{1}] of static IP Unknown\ code[%s]\ of\ Security\ Level = Unknown code[{0}] of Security Level [%s]\ is\ not\ a\ standard\ cidr = [{0}] is not a standard cidr +the\ host[uuid\:%s]'s\ operating\ system\ %s\ %s\ is\ too\ old,\ the\ QEMU\ doesn't\ support\ QoS\ of\ network\ or\ disk\ IO.\ Please\ choose\ another\ instance\ offering\ with\ no\ QoS\ configuration = the host[uuid:{0}]''s operating system {1} {2} is too old, the QEMU doesn''t support QoS of network or disk IO. Please choose another instance offering with no QoS configuration invalid\ value[%s],\ it\ must\ be\ a\ double\ greater\ than\ 0 = invalid value[{0}], it must be a double greater than 0 invalid\ value[%s],\ it\ must\ be\ a\ double\ between\ (0,\ 1] = invalid value[{0}], it must be a double between (0, 1] invalid\ value[%s],\ it's\ not\ a\ double = invalid value[{0}], it''s not a double @@ -2201,6 +2586,7 @@ can\ not\ find\ node\ A\ address\ info\ from\ bootstrap\ agent = can not find no can\ not\ get\ bootstrap\ job\ %s\ result\ after\ 900s = can not get bootstrap job {0} result after 900s curl\ bootstrap\ agent\ finished,\ return\ code\:\ %s,\ stdout\:\ %s,\ stderr\:\ %s = curl bootstrap agent finished, return code: {0}, stdout: {1}, stderr: {2} VM\ instance[uuid\:\ %s]\ not\ found = VM instance[uuid: {0}] not found +failed\ to\ create\ cache\ for\ templated\ vmInstance\ %s,\ because\ %s = failed to create cache for templated vmInstance {0}, because {1} all\ management\ node\ update\ factory\ mode\ failed,\ details\:\ %s = all management node update factory mode failed, details: {0} node\ A\ update\ factory\ mode\ failed,\ details\:\ %s = node A update factory mode failed, details: {0} some\ node\ on\ factory\ mode\ exists,\ detail\ of\ arping\:\ %s = some node on factory mode exists, detail of arping: {0} @@ -2216,9 +2602,13 @@ API[%s]\ is\ not\ allowed\ for\ the\ ZSV\ license = API[{0}] is not allowed for API[%s]\ is\ not\ allowed\ for\ the\ community-source\ license,\ please\ apply\ an\ enterprise\ license = API[{0}] is not allowed for the community-source license, please apply an enterprise license Shareable\ Volume[uuid\:%s]\ has\ already\ been\ attached\ to\ VM[uuid\:%s] = Shareable Volume[uuid:{0}] has already been attached to VM[uuid:{1}] shareable\ disk\ only\ support\ virtio-scsi\ type\ for\ now = shareable disk only support virtio-scsi type for now +shareable\ volume(s)[uuid\:\ %s]\ attached,\ not\ support\ to\ group\ snapshot. = shareable volume(s)[uuid: {0}] attached, not support to group snapshot. the\ license\ has\ been\ expired,\ please\ renew\ it = the license has been expired, please renew it invalid\ volume[%s]\ iothread\ pin[%s]! = invalid volume[{0}] iothread pin[{1}]! Failed\ set\ iothread[%d]\ pin[%s]\ on\ vm[%s]\:\ %s. = Failed set iothread[{0}] pin[{1}] on vm[{2}]: {3}. +can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = can not found in used snapshot tree of volume[uuid: {0}]. Maybe no snapshot chain need to validate. +can\ not\ found\ latest\ snapshot\ from\ tree[uuid\:\ %s]\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = can not found latest snapshot from tree[uuid: {0}] of volume[uuid: {1}]. Maybe no snapshot chain need to validate. +can\ not\ found\ snapshots\ from\ tree[uuid\:\ %s]\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = can not found snapshots from tree[uuid: {0}] of volume[uuid: {1}]. Maybe no snapshot chain need to validate. Unexpectedly,\ VM[uuid\:%s]\ is\ not\ running\ any\ more,\ please\ try\ again\ later = Unexpectedly, VM[uuid:{0}] is not running any more, please try again later How\ can\ a\ Running\ VM[uuid\:%s]\ has\ no\ hostUuid? = How can a Running VM[uuid:{0}] has no hostUuid? can\ not\ take\ snapshot\ for\ volumes[%s]\ while\ volume[uuid\:\ %s]\ not\ attached = can not take snapshot for volumes[{0}] while volume[uuid: {1}] not attached @@ -2227,6 +2617,7 @@ can\ not\ take\ snapshot\ for\ volumes[%s]\ attached\ multiple\ vms[%s,\ %s] = c no\ volumes\ found = no volumes found this\ snapshot\ recording\ the\ volume\ state\ before\ resize\ to\ %fG\ is\ created\ automatically = this snapshot recording the volume state before resize to {0}G is created automatically DeleteVolumeQos\ [%s]\ ignore\ because\ of\ account\ privilege. = DeleteVolumeQos [{0}] ignore because of account privilege. +Cannot\ delete\ vm's\ volume\ qos\ on\ host\ %s,\ because\ the\ current\ vm\ is\ in\ state\ of\ %s,\ but\ support\ expect\ states\ are\ [%s,\ %s] = Cannot delete vm''s volume qos on host {0}, because the current vm is in state of {1}, but support expect states are [{2}, {3}] SetVolumeQosMsg\ version\ 1\ is\ deprecated,\ please\ use\ version\ 2 = SetVolumeQosMsg version 1 is deprecated, please use version 2 non\ admin\ account\ cannot\ set\ bandwidth\ more\ than\ %s = non admin account cannot set bandwidth more than {0} unknown\ message\ version. = unknown message version. @@ -2246,6 +2637,7 @@ failed\ to\ detach\ shareable\ volume[uuid\:%s]\ from\ VmInstance[uuid\:%s] = fa failed\ to\ detach\ shareable\ volume\ from\ VmInstance\:[\\n%s] = failed to detach shareable volume from VmInstance because:[\\n{0}] unsupported\ operation\ for\ setting\ root\ volume[%s]\ multiQueues. = unsupported operation for setting root volume[{0}] multiQueues. unsupported\ operation\ for\ setting\ virtio-scsi\ volume[%s]\ multiQueues. = unsupported operation for setting virtio-scsi volume[{0}] multiQueues. +ZStack\ has\ been\ paused,\ reject\ all\ API\ which\ are\ not\ read\ only.\ If\ you\ really\ want\ to\ call\ it\ and\ known\ the\ consequence,\ add\ '%s'\ into\ systemTags. = ZStack has been paused, reject all API which are not read only. If you really want to call it and known the consequence, add ''{0}'' into systemTags. the\ current\ version\ of\ license\ does\ not\ support\ modifying\ this\ global\ config\ [name\:%s] = the current version of license does not support modifying this global config [name:{0}] the\ current\ version\ of\ license\ does\ not\ support\ modifying\ this\ resource\ config\ [name\:%s] = the current version of license does not support modifying this resource config [name:{0}] cannot\ find\ mode\ from\ null\ VolumeQos = cannot find mode from null VolumeQos @@ -2256,6 +2648,7 @@ the\ resource[type\:%s]\ doesn't\ have\ any\ monitoring\ items = the resource[ty the\ resource[uuid\:%s]\ doesn't\ belong\ to\ the\ account[uuid\:%s] = the resource[uuid:{0}] doesn''t belong to the account[uuid:{1}] cannot\ find\ type\ for\ the\ resource[uuid\:%s] = cannot find type for the resource[uuid:{0}] no\ monitoring\ item\ found\ for\ the\ resourceType[%s]\ and\ item[%s] = no monitoring item found for the resourceType[{0}] and item[{1}] +A\ resource[name\:{resourceName},\ uuid\:{resourceUuid},\ type\:{resourceType}]'s\ monitoring\ trigger[uuid\:{triggerUuid}]\ changes\ status\ to\ {triggerStatus} = A resource[name:'{resourceName}', uuid:'{resourceUuid}', type:'{resourceType}']''s monitoring trigger[uuid:'{triggerUuid}'] changes status to '{triggerStatus}' \\n\=\=\=\ BELOW\ ARE\ DETAILS\ OF\ THE\ PREVIOUS\ ALERT\ \=\=\= = \\n=== BELOW ARE DETAILS OF THE PREVIOUS ALERT === \\nalert\ details\: = \\nalert details: \\ncondition\:\ {itemName}\ {operator}\ {threshold} = \\ncondition: '{itemName}' '{operator}' '{threshold}' @@ -2265,6 +2658,7 @@ VM\ CPU\ utilization = VM CPU utilization The\ problem\ may\ be\ caused\ by\ an\ incorrect\ user\ name\ or\ password\ or\ email\ permission\ denied = The problem may be caused by an incorrect user name or password or email permission denied Couldn't\ connect\ to\ host,\ port\:\ %s,\ %d.\ The\ problem\ may\ be\ caused\ by\ an\ incorrect\ smtpServer\ or\ smtpPort = Couldn''t connect to host, port: {0}, {1}. The problem may be caused by an incorrect smtpServer or smtpPort conflict\ alert\ rule[%s],\ there\ has\ been\ a\ rule[%s]\ with\ the\ same\ name = conflict alert rule[{0}], there has been a rule[{1}] with the same name +ALERT\:\\n\ resource[name\:\ %s,\ uuid\:\ %s,\ type\:\ %s]\\nevent\:\ %s\ %s\ %s\\ncurrent\ value\:\ %s\\nduration\:\ %s\ seconds\\n = ALERT:\\n resource[name: {0}, uuid: {1}, type: {2}]\\nevent: {3} {4} {5}\\ncurrent value: {6}\\nduration: {7} seconds\\n the\ relativeTime[%s]\ is\ invalid,\ it\ must\ be\ in\ format\ of,\ for\ example,\ 10s,\ 1h = the relativeTime[{0}] is invalid, it must be in format of, for example, 10s, 1h the\ relativeTime[%s]\ is\ invalid,\ it's\ too\ big = the relativeTime[{0}] is invalid, it''s too big CPU\ number = CPU number @@ -2330,6 +2724,7 @@ cannot\ sr-iov\ virtualize\ pci\ devices\ on\ interface[uuid\:%s]\ that\ are\ be pci\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ not\ sriov\ virtualized = pci device[uuid:{0}] doesn''t exist or is not sriov virtualized virtual\ pci\ devices\ generated\ from\ pci\ devices\ in\ host[uuid\:%s]\ still\ attached\ to\ vm = virtual pci devices generated from pci devices in host[uuid:{0}] still attached to vm sub-devices\ of\ pci\ device[uuid\:%s]\ are\ attached\ to\ paused\ VMs[uuids\:%s],\ please\ detach\ them\ first = sub-devices of pci device[uuid:{0}] are attached to paused VMs[uuids:{1}], please detach them first +pci\ device[uuid\:%s]\ cannot\ be\ virtualized\ into\ mdevs,\ make\ sure\ it's\ enabled\ and\ un-attached = pci device[uuid:{0}] cannot be virtualized into mdevs, make sure it''s enabled and un-attached pci\ device[uuid\:%s]\ cannot\ be\ virtualized\ by\ mdev\ spec[uuid\:%s] = pci device[uuid:{0}] cannot be virtualized by mdev spec[uuid:{1}] pci\ device[uuid\:%s]\ is\ not\ virtualized\ into\ mdevs = pci device[uuid:{0}] is not virtualized into mdevs mdev\ devices\ generated\ from\ pci\ device[uuid\:%s]\ still\ attached\ to\ vm = mdev devices generated from pci device[uuid:{0}] still attached to vm @@ -2337,6 +2732,7 @@ the\ host[uuid\:%s]\ that\ pci\ device[uuid\:%s]\ in\ is\ not\ Connected = the h please\ umount\ all\ GPU\ devices\ of\ the\ vm[%s]\ and\ try\ again = please umount all GPU devices of the vm[{0}] and try again please\ umount\ all\ vGPU\ devices\ of\ the\ vm[%s]\ and\ try\ again = please umount all vGPU devices of the vm[{0}] and try again please\ umount\ other\ pci\ devices\ of\ the\ vm[%s]\ and\ try\ again = please umount other pci devices of the vm[{0}] and try again +specified\ pci\ devices\ are\ not\ on\ the\ same\ host\:\ pci\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s]\ while\ pci\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s] = specified pci devices are not on the same host: pci device[uuid: {0}] on host[uuid: {1}] while pci device[uuid: {2}] on host[uuid: {3}] the\ PCI\ devices[uuid\:%s]\ is\ not\ on\ this\ host = the PCI devices[uuid:{0}] is not on this host failed\ to\ start\ vm[uuid\:%s]\ because\ not\ all\ pci\ specs[uuids\:%s]\ exist = failed to start vm[uuid:{0}] because not all pci specs[uuids:{1}] exist not\ enough\ PCI\ devices = not enough PCI devices @@ -2354,6 +2750,7 @@ pci\ device\ spec[uuid\:%s]\ doesn't\ exists = pci device spec[uuid:{0}] doesn'' illegal\ type[%s]\ for\ pci\ device\ spec,\ only\ %s\ are\ legal = illegal type[{0}] for pci device spec, only {1} are legal pci\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled\ for\ vm[uuid\:%s] = pci device[uuid:{0}] doesn''t exist or is disabled for vm[uuid:{1}] pci\ device[uuid\:%s]\ can\ not\ attach\ to\ vm[uuid\:%s]\ due\ to\ wrong\ status = pci device[uuid:{0}] can not attach to vm[uuid:{1}] due to wrong status +The\ host\ [%s]\ has\ failed\ to\ enter\ the\ maintenance,\ The\ vm\ [%s]\ cannot\ migrate\ automatically\ because\ it\ contains\ the\ PCI\ device = The host [{0}] has failed to enter the maintenance, The vm [{1}] cannot migrate automatically because it contains the PCI device don't\ set\ rom\ version\ if\ has\ no\ rom\ content = don''t set rom version if has no rom content too\ large\ pci\ device\ rom\ file = too large pci device rom file don't\ set\ rom\ content/version\ if\ you\ want\ to\ abandon\ rom\ from\ the\ spec = don''t set rom content/version if you want to abandon rom from the spec @@ -2394,9 +2791,11 @@ failed\ to\ get\ candidate\ hosts\ to\ start\ vm[uuid\:%s],\ %s = failed to get pci\ device[uuid\:%s]\ is\ known\ as\ %s,\ but\ cannot\ find\ it's\ mdev\ spec,\ so\ abort. = pci device[uuid:{0}] is known as {1}, but cannot find it''s mdev spec, so abort. failed\ to\ start\ vm[uuid\:%s]\ because\ not\ all\ mdev\ specs[uuids\:%s]\ exist = failed to start vm[uuid:{0}] because not all mdev specs[uuids:{1}] exist not\ enough\ Mdev\ devices = not enough Mdev devices +specified\ mdev\ devices\ not\ on\ same\ host\:\ mdev\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s]\ while\ mdev\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s] = specified mdev devices not on same host: mdev device[uuid: {0}] on host[uuid: {1}] while mdev device[uuid: {2}] on host[uuid: {3}] the\ Mdev\ devices[uuid\:%s]\ is\ not\ on\ this\ host = the Mdev devices[uuid:{0}] is not on this host IOMMU\ state\ is\ not\ enabled = IOMMU state is not enabled IOMMU\ status\ is\ not\ active = IOMMU status is not active +The\ host\ [%s]\ has\ failed\ to\ enter\ the\ maintenance,\ because\ vm[%s]\ has\ mdev\ devices\ attached\ and\ cannot\ migrate\ automatically = The host [{0}] has failed to enter the maintenance, because vm[{1}] has mdev devices attached and cannot migrate automatically failed\ to\ find\ enough\ mdev\ device\ of\ spec[uuid\:%s]\ in\ dest\ host[uuid\:%s]\ for\ vm[uuid\:%s] = failed to find enough mdev device of spec[uuid:{0}] in dest host[uuid:{1}] for vm[uuid:{2}] cannot\ find\ mdev\ device[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find mdev device[uuid:{0}], it may have been deleted mdev\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled\ for\ vm[uuid\:%s] = mdev device[uuid:{0}] doesn''t exist or is disabled for vm[uuid:{1}] @@ -2432,13 +2831,14 @@ trigger\ job[uuid\:\ %s]\ failed,\ because\ %s = trigger job[uuid: {0}] failed, trigger\ job\ group[uuid\:\ %s]\ failed,\ because\ %s = trigger job group[uuid: {0}] failed, because {1} field[%s]\ cannot\ be\ empty = field[{0}] cannot be empty the\ volume[%s]\ is\ not\ root\ volume = the volume[{0}] is not root volume +the\ vm\ of\ the\ root\ volume[%s]\ is\ not\ available.\ check\ if\ the\ vm\ exists. = the vm of the root volume[{0}] is not available. check if the vm exists. snapshotGroupMaxNumber\ \:\ %s\ format\ error\ because\ %s = snapshotGroupMaxNumber : {0} format error because {1} +the\ volume[%s]\ is\ not\ available.\ check\ if\ the\ volume\ exists. = the volume[{0}] is not available. check if the volume exists. the\ volume[%s]\ does\ not\ support\ snapshots\ retention = the volume[{0}] does not support snapshots retention snapshotMaxNumber\ \:\ %s\ format\ error\ because\ %s = snapshotMaxNumber : {0} format error because {1} +the\ vm\ of\ the\ root\ volume[%s]\ state\ in\ Destroyed.\ job\ state\ change\ is\ not\ allowed = the vm of the root volume[{0}] state in Destroyed. job state change is not allowed volume[uuid\:%s]\ is\ deleted,\ state\ change\ is\ not\ allowed = volume[uuid:{0}] is deleted, state change is not allowed vm[uuid\:%s]\ is\ destroyed,\ state\ change\ is\ not\ allowed = vm[uuid:{0}] is destroyed, state change is not allowed -unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = unable to allocate backup storage specified by uuids: {0}, becasue: {1} -No\ backup\ storage\ to\ commit\ volume\ [uuid\:\ %s] = No backup storage to commit volume [uuid: {0}] unable\ to\ commit\ backup\ storage\ specified\ by\ uuids\:\ %s = unable to commit backup storage specified by uuids: {0} unable\ to\ connect\ to\ SimpleHttpBackupStorage[url\:%s],\ because\ %s = unable to connect to SimpleHttpBackupStorage[url:{0}], because {1} Missing\ cert\ file\ for\ downloading\ image\:\ %s = Missing cert file for downloading image: {0} @@ -2455,6 +2855,7 @@ image[%s]\ not\ found\ on\ backup\ storage[%s] = image[{0}] not found on backup the\ backup\ storage[uuid\:%s]\ has\ not\ enough\ capacity[%s]\ to\ export = the backup storage[uuid:{0}] has not enough capacity[{1}] to export image\ store\ [%s]\ cannot\ add\ image,\ because\ it\ is\ used\ for\ backup\ remote = image store [{0}] cannot add image, because it is used for backup remote commercial\ license\ is\ required\ to\ use\ ImageStore = commercial license is required to use ImageStore +the\ uuid\ of\ imagestoreBackupStorage\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = the uuid of imagestoreBackupStorage agent changed[expected:{0}, actual:{1}], it''s most likely the agent was manually restarted. Issue a reconnect to sync the status get\ image\ hash\ failed,\ because\:%s = get image hash failed, because:{0} unable\ to\ reconnect\ target\ server\:\ %s,\ detail\ error\ info\:\ %s = unable to reconnect target server: {0}, detail error info: {1} miss\ image\ path\ on\ bs[%s] = miss image path on bs[{0}] @@ -2505,6 +2906,8 @@ the\ volume[uuid\:%s]\ is\ still\ attached\ on\ vm[uuid\:%s],\ please\ detach\ i cannot\ migrate\ data\ volume[uuid\:%s]\ bewteen\ sharedblock\ primary\ storages\ when\ vm[vmuuid\:%s]\ instance\ is\ not\ stopped. = cannot migrate data volume[uuid:{0}] bewteen sharedblock primary storages when vm[vmuuid:{1}] instance is not stopped. do\ not\ support\ storage\ migration\ while\ shared\ volume[uuid\:\ %s,\ name\:\ %s]\ attached = do not support storage migration while shared volume[uuid: {0}, name: {1}] attached Cannot\ migrate\ volume\ from\ %s\ to\ %s. = Cannot migrate volume from {0} to {1}. +can\ not\ migrate\ volume[%s],\ because\ volume\ state\ is\ Disabled = can not migrate volume[{0}], because volume state is Disabled +there\ are\ not\ enough\ capacity\ for\ vm[uuid\:\ %s]\ storage\ migration,\ required\ capacity(include\ image\ cache)\:\ %s,\ current\ available\ physical\ capacity\:\ %s = there are not enough capacity for vm[uuid: {0}] storage migration, required capacity(include image cache): {1}, current available physical capacity: {2} not\ support\ vm\ state[%s]\ to\ do\ storage\ migration = not support vm state[{0}] to do storage migration unsupported\ storage\ migration\ type\:\ from\ %s\ to\ %s = unsupported storage migration type: from {0} to {1} not\ support\ to\ cancel\ %s = not support to cancel {0} @@ -2516,12 +2919,14 @@ Failed\ to\ migrate\ Image\ %s\ from\ BS\ %s\ to\ BS\ %s.\ cause\:\ %s = Failed can\ not\ find\ volume\ path\ from\ snapshot\ install\ path[%s]\ by\ regex[%s] = can not find volume path from snapshot install path[{0}] by regex[{1}] vm[uuid\:%s]\ storage\ migration\ long\ job[uuid\:%s]\ failed\ because\ management\ node\ was\ restarted = vm[uuid:{0}] storage migration long job[uuid:{1}] failed because management node was restarted The\ type\ [%s]\ of\ volume\ is\ invalid. = The type [{0}] of volume is invalid. +found\ trashId(%s)\ in\ primaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ please\ clean\ it\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = found trashId({0}) in primaryStorage [{1}] for the migrate installPath[{2}]. please clean it first by ''APICleanUpTrashOnPrimaryStorageMsg'' if you insist to migrate the volume[{3}] cannot\ find\ any\ connected\ host\ to\ perform\ the\ storage\ migration\ operation = cannot find any connected host to perform the storage migration operation found\ trashId(%s)\ in\ PrimaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ Please\ clean\ it\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = found trashId({0}) in PrimaryStorage [{1}] for the migrate installPath[{2}]. Please clean it first by ''APICleanUpTrashOnPrimaryStorageMsg'' if you insist to migrate the volume[{3}] found\ related\ trash\ paths(%s)\ in\ PrimaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ Please\ clean\ them\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = found related trash paths({0}) in PrimaryStorage [{1}] for the migrate installPath[{2}]. Please clean them first by ''APICleanUpTrashOnPrimaryStorageMsg'' if you insist to migrate the volume[{3}] -couldn’t\ find\ any\ BackupStorage\ that\ is\ connected\ and\ enabled\ for\ commiting\ volume\ [uuid\:%s] = couldn’t find any BackupStorage that is connected and enabled for commiting volume [uuid:{0}] +volume[uuid\:%s]\ has\ image[uuid\:%s]\ dependency,\ other\ dependency\ image[%s] = volume[uuid:{0}] has image[uuid:{1}] dependency, other dependency image[{2}] CephPrimaryStorage[%s]\ not\ existed! = CephPrimaryStorage[{0}] not existed! current\ license[%s]\ is\ not\ valid\ license\ while\ download\ from\ imagestore\ backupstorage = current license[{0}] is not valid license while download from imagestore backupstorage +The\ source\ vm\ has\ local\ data\ volume\ on\ host[uuid\:\ %s],but\ in\ fast\ clone\ api\ msg\ try\ to\ clone\ vm\ to\ host[%s],\ which\ is\ impossible\ for\ fast\ clone\ feature. = The source vm has local data volume on host[uuid: {0}],but in fast clone api msg try to clone vm to host[{1}], which is impossible for fast clone feature. System\ can't\ find\ imagestore\ backup\ Storage.\ Please\ do\ not\ set\ imagestore\ backup\ Storage\ server\ IP\ to\ localhost(127.*.*.*), = System can''t find imagestore backup Storage. Please do not set imagestore backup Storage server IP to localhost(127.*.*.*), %s\ failed\ to\ download\ bits\ from\ the\ imagestore\ backup\ storage[hostname\:%s,\ path\:\ %s]\ to\ the\ local\ primary\ storage[uuid\:%s,\ path\:\ %s],\ %s = {0} failed to download bits from the imagestore backup storage[hostname:{1}, path: {2}] to the local primary storage[uuid:{3}, path: {4}], {5} failed\ to\ upload\ bits\ from\ the\ local\ storage[uuid\:%s,\ path\:%s]\ to\ image\ store\ [hostname\:%s],\ %s = failed to upload bits from the local storage[uuid:{0}, path:{1}] to image store [hostname:{2}], {3} @@ -2532,16 +2937,23 @@ failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ no\ data\ returned failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ expired_time\ is\ null = failed to get primaryStorage[{0}] license info, because expired_time is null failed\ to\ parse\ the\ date\ format[%s]\ of\ the\ primaryStorage[%s]\ license\ info = failed to parse the date format[{0}] of the primaryStorage[{1}] license info failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ the\ returned\ data\ does\ not\ have\ an\ active\ license = failed to get primaryStorage[{0}] license info, because the returned data does not have an active license +the\ current\ primaryStorage\ %s\ does\ not\ have\ a\ third-party\ token\ set,\ and\ the\ block\ volume\ cannot\ be\ created\ temporarily = the current primaryStorage {0} does not have a third-party token set, and the block volume cannot be created temporarily +the\ current\ primaryStorage\ %s\ is\ not\ Ceph\ type,\ can\ not\ get\ access\ path = the current primaryStorage {0} is not Ceph type, can not get access path +Ceph\ type\ block\ volume\ accessPathId,\ accessPathIqn\ cannot\ be\ null = Ceph type block volume accessPathId, accessPathIqn cannot be null +current\ primary\ storage\ type\ not\ support\ block\ volume,\ supporttype\ has\ %s = current primary storage type not support block volume, supporttype has {0} no\ block\ volume\ factory\ found\ for\ vendor\:\ %s = no block volume factory found for vendor: {0} iothread\ need\ qemu\ version\ >\=\ %s,\ but\ %s\ on\ host[%s]. = iothread need qemu version >= {0}, but {1} on host[{2}]. iothread\ need\ libvirt\ version\ >\=\ %s,\ but\ %s\ on\ host[%s]. = iothread need libvirt version >= {0}, but {1} on host[{2}]. root\ volume[%s]\ cannot\ set\ iothreadpin. = root volume[{0}] cannot set iothreadpin. current\ iothread\ id[%s]\ is\ not\ the\ same\ as\ attached\ vol[%s]\ iothread[%s]. = current iothread id[{0}] is not the same as attached vol[{1}] iothread[{2}]. +snapshot\ validation\ is\ unsupported\ for\ volume[uuid\:\ %s].\ Volume\ should\ be\ attached\ to\ vm = snapshot validation is unsupported for volume[uuid: {0}]. Volume should be attached to vm +snapshot\ validation\ is\ unsupported\ for\ volume[uuid\:\ %s].\ Attached\ vm\ is\ not\ in\ state\ of\ [%s,\ %s] = snapshot validation is unsupported for volume[uuid: {0}]. Attached vm is not in state of [{1}, {2}] volume[uuid\:%s]\ can\ not\ found = volume[uuid:{0}] can not found not\ support\ take\ snapshots\ volume[uuid\:%s,\ uuid\:%s]\ on\ different\ vms[uuid\:%s,\ uuid\:%s] = not support take snapshots volume[uuid:{0}, uuid:{1}] on different vms[uuid:{2}, uuid:{3}] volume[uuid\:%s]\ is\ not\ ready = volume[uuid:{0}] is not ready state\ of\ vm[uuid\:\ %s]\ is\ %s,\ not\ allowed\ to\ take\ snapshots = state of vm[uuid: {0}] is {1}, not allowed to take snapshots volume[uuid\:%s]\ is\ not\ data\ volume = volume[uuid:{0}] is not data volume +can\ not\ resize\ volume[%s],\ because\ volume\ state\ is\ Disabled = can not resize volume[{0}], because volume state is Disabled At\ least\ one\ of\ vmInstanceUuid\ or\ uuid\ should\ be\ set = At least one of vmInstanceUuid or uuid should be set no\ volume[uuid\:%s,\ vmInstanceUuid\:%s]\ can\ be\ found = no volume[uuid:{0}, vmInstanceUuid:{1}] can be found volume[uuid\:%s]\ is\ not\ root\ volume = volume[uuid:{0}] is not root volume @@ -2555,6 +2967,9 @@ Minimum\ increase\ size\ should\ be\ larger\ than\ 4MB = Minimum increase size s Expansion\ operation\ not\ allowed\ at\ host\ disable = Expansion operation not allowed at host disable Expansion\ operation\ not\ allowed\ at\ all\ host\ disable = Expansion operation not allowed at all host disable shared\ volume[uuid\:\ %s]\ has\ attached\ to\ not\ stopped\ vm\ instances[uuids\:\ %s] = shared volume[uuid: {0}] has attached to not stopped vm instances[uuids: {1}] +name\:\ [%s]\ already\ exists,\ block\ volume\ name\ cannot\ be\ duplicated\ on\ type[%s]\ primarystorage = name: [{0}] already exists, block volume name cannot be duplicated on type[{1}] primarystorage +[protocol]\ parameter\ is\ null,\ type[%s]\ primarystorage\ must\ set\ block\ volume\ protocol = [protocol] parameter is null, type[{0}] primarystorage must set block volume protocol +current\ [%s]\ primary\ storage\ not\ support\ [%s]\ type\ protocol,\ please\ add\ protocol\ to\ storage\ first = current [{0}] primary storage not support [{1}] type protocol, please add protocol to storage first ExponBlockVolume[uuid\:%s]\ not\ found = ExponBlockVolume[uuid:{0}] not found path\ error = path error unable\ to\ find\ any\ TemplateConfigs\:\ [templateUuid\:\ %s] = unable to find any TemplateConfigs: [templateUuid: {0}] @@ -2583,6 +2998,7 @@ the\ usb\ devices[uuid\:%s,\ name\:%s]\ in\ host[uuid\:%s,\ name\:%s]\ is\ occup the\ usb\ device[uuid\:%s]\ has\ already\ been\ attached\ to\ same\ vm[uuid\:%s] = the usb device[uuid:{0}] has already been attached to same vm[uuid:{1}] the\ usb\ device[uuid\:%s]\ has\ already\ been\ attached\ to\ another\ vm[uuid\:%s] = the usb device[uuid:{0}] has already been attached to another vm[uuid:{1}] PassThrough\ only\ support\ use\ on\ vm\ running\ host = PassThrough only support use on vm running host +cannot\ attach\ the\ usb\ device[uuid\:%s]\ to\ vm[uuid\:%s],\ possibly\ reasons\ include\:\ the\ device\ is\ not\ enabled\ or\ had\ been\ attached\ to\ a\ vm,\ or\ the\ device\ and\ the\ vm\ are\ not\ on\ same\ host. = cannot attach the usb device[uuid:{0}] to vm[uuid:{1}], possibly reasons include: the device is not enabled or had been attached to a vm, or the device and the vm are not on same host. usb\ is\ already\ bound\ to\ vm[uuid\:%s]\ and\ cannot\ be\ bound\ to\ other\ vm = usb is already bound to vm[uuid:{0}] and cannot be bound to other vm vm[%s]\ cannot\ start\ because\ usb\ redirect\ host\ is\ not\ connected = vm[{0}] cannot start because usb redirect host is not connected cannot\ migrate\ vm[uuid\:%s]\ because\ there\ are\ usb\ devices\ attached\ by\ passthrough = cannot migrate vm[uuid:{0}] because there are usb devices attached by passthrough @@ -2606,6 +3022,7 @@ Failed\ to\ find\ vm[uuid\=%s]\ in\ ESX\ host[uuid\=%s] = Failed to find vm[uuid Failed\ to\ update\ ESX\ VM[uuid\=%s]\ configuration.\ See\ more\ details\ in\ the\ log. = Failed to update ESX VM[uuid={0}] configuration. See more details in the log. VM\ [%s]\ not\ found\ in\ vCenter = VM [{0}] not found in vCenter failed\ to\ power\ on\ VM,\ task\ status\:\ %s = failed to power on VM, task status: {0} +console\ password\ is\ not\ supported\ by\ vm[uuid\:%s]\ on\ ESXHost[ESXI\ version\:%s] = console password is not supported by vm[uuid:{0}] on ESXHost[ESXI version:{1}] vCenter\ login\ name\ expected. = vCenter login name expected. domainName[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = domainName[{0}] is neither an IPv4 address nor a valid hostname vCenter\ [domainName\:%s]\ has\ been\ added = vCenter [domainName:{0}] has been added @@ -2630,6 +3047,10 @@ There\ are\ tasks\ running\ on\ the\ VCenter[uuid\:%s],\ please\ try\ again\ lat VCenter[uuid\:%s]\ not\ found\:\ = VCenter[uuid:{0}] not found: Login\ failed,\ please\ check\ your\ login\ parameters. = Login failed, please check your login parameters. connect\ %s\ failed\:\ %s = connect {0} failed: {1} +Login\ to\ vCenter\ [%s]\ failed\ with\ user\ [%s],please\ check\ your\ network\ connection\ and\ credential. = Login to vCenter [{0}] failed with user [{1}],please check your network connection and credential. +Parse\ response\ failed\ from\ vCenter\ [%s],please\ check\ the\ port\ number[%d]. = Parse response failed from vCenter [{0}],please check the port number[{1}]. +SSL\ handshake\ failed\ with\ vCenter\ [%s],because\ insecure\ TLS\ 1.0\ is\ used.\ Manually\ enabled\ TLS\ 1.0\ in\ jdk\ configuration\ if\ needed. = SSL handshake failed with vCenter [{0}],because insecure TLS 1.0 is used. Manually enabled TLS 1.0 in jdk configuration if needed. +SSL\ handshake\ failed\ with\ vCenter\ [%s],please\ check\ the\ port\ number[%d]. = SSL handshake failed with vCenter [{0}],please check the port number[{1}]. No\ clustered\ compute\ resource\ found = No clustered compute resource found No\ dvSwitch\ or\ qualified\ vSwitch\ found = No dvSwitch or qualified vSwitch found Datastore\ %s\ not\ found\ for\ vCenter\ %s = Datastore {0} not found for vCenter {1} @@ -2645,6 +3066,7 @@ No\ file\ Datacenter = No file Datacenter failed\ to\ get\ VM\ from\ installPath\:\ %s = failed to get VM from installPath: {0} failed\ to\ get\ VM[%s]\ root\ disk\ usage = failed to get VM[{0}] root disk usage failed\ to\ connect\ to\ vCenter\:\ %s\:\ %s = failed to connect to vCenter: {0}: {1} +VCenter[uuid\=%s]\ is\ Disabled.\ You\ can\ only\ perform\ read-only\ operations\ on\ this\ VCenter.\ If\ you\ want\ to\ make\ configuration\ changes\ to\ it,\ you\ need\ to\ update\ config\ by\ UpdateVCenterAction\ {uuid\=%s\ state\=Enabled} = VCenter[uuid={0}] is Disabled. You can only perform read-only operations on this VCenter. If you want to make configuration changes to it, you need to update config by UpdateVCenterAction '{uuid={1}' state=Enabled} VCenter[uuid\=%s]\ are\ Disabled.\ You\ can\ only\ perform\ read-only\ operations\ on\ these\ VCenter. = VCenter[uuid={0}] are Disabled. You can only perform read-only operations on these VCenter. VCenter\ not\ found = VCenter not found VCenter[%s]\ is\ not\ in\ operation\ status,\ current\ status\:\ %s = VCenter[{0}] is not in operation status, current status: {1} @@ -2685,17 +3107,23 @@ cannot\ find\ the\ route\ table\ [uuid\:%s] = cannot find the route table [uuid: # In Module: ministorage Invalid\ resourceUuid\ %s = Invalid resourceUuid {0} primary\ storage\ uuid\ cannot\ be\ null. = primary storage uuid cannot be null. +volume[uuid\:%s]\ has\ been\ attached\ some\ VM(s)[uuid\:%s]\ which\ are\ not\ Stopped\ and\ not\ running\ on\ the\ specific\ host. = volume[uuid:{0}] has been attached some VM(s)[uuid:{1}] which are not Stopped and not running on the specific host. VM[uuid\:%s]\ are\ not\ Stopped\ and\ not\ running\ on\ the\ specific\ host. = VM[uuid:{0}] are not Stopped and not running on the specific host. Fail\ to\ %s,\ because\ host(s)[uuid\:%s]\ are\ not\ enable\ and\ not\ in\ connected\ status. = Fail to {0}, because host(s)[uuid:{1}] are not enable and not in connected status. cannot\ find\ proper\ hypervisorType\ for\ primary\ storage[uuid\:%s]\ to\ handle\ image\ format\ or\ volume\ format[%s] = cannot find proper hypervisorType for primary storage[uuid:{0}] to handle image format or volume format[{1}] ResourceType\ [%s]\ of\ APIRecoverResourceSplitBrainMsg\ is\ invalid. = ResourceType [{0}] of APIRecoverResourceSplitBrainMsg is invalid. +the\ mini\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = the mini storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume can\ not\ determine\ which\ host = can not determine which host +the\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected no\ connected\ host\ found,\ mini\ storage\ failed = no connected host found, mini storage failed +host[uuid\:\ %s]\ of\ mini\ primary\ storage[uuid\:\ %s]\ doesn't\ have\ enough\ capacity[current\:\ %s\ bytes,\ needed\:\ %s] = host[uuid: {0}] of mini primary storage[uuid: {1}] doesn''t have enough capacity[current: {2} bytes, needed: {3}] can\ not\ get\ cluster\ uuid\ of\ volume\ %s = can not get cluster uuid of volume {0} -no\ connected\ host\ found\ in\ the\ cluster[uuid\:%s] = no connected host found in the cluster[uuid:{0}] no\ backup\ storage\ can\ get\ image[uuid\:%s]\ of\ volume[uuid\:%s] = no backup storage can get image[uuid:{0}] of volume[uuid:{1}] image[uuid\:\ %s]\ has\ no\ image\ ref\ with\ backup\ storage[uuid\:\ %s] = image[uuid: {0}] has no image ref with backup storage[uuid: {1}] cannot\ find\ backup\ storage[uuid\:%s] = cannot find backup storage[uuid:{0}] +can\ not\ find\ any\ available\ host\ to\ resize\ volume[uuid\:\ %s]\ on\ mini\ storage[uuid\:\ %s] = can not find any available host to resize volume[uuid: {0}] on mini storage[uuid: {1}] +volume[uuid\:%s]\ replication\ is\ syncing\ data,\ please\ wait\ until\ it\ is\ finished. = volume[uuid:{0}] replication is syncing data, please wait until it is finished. +replication\ network\ status\ of\ volume[uuid\:%s]\ run\ into\ StandAlone,\ but\ host\ are\ all\ Connected,\ please\ recover\ it\ first. = replication network status of volume[uuid:{0}] run into StandAlone, but host are all Connected, please recover it first. Invalid\ path\ string\ %s = Invalid path string {0} Still\ cache\ volume\ exists\ on\ ps[uuid\:%s]\ can\ not\ update\ cache\ volume\ url = Still cache volume exists on ps[uuid:{0}] can not update cache volume url can\ not\ find\ replication\ of\ volume\ %s\ on\ host\ %s = can not find replication of volume {0} on host {1} @@ -2707,15 +3135,33 @@ can\ not\ allocate\ storage\ sync\ port\ on\ host\ %s\:\ %s = can not allocate s expect\ operate\ on\ hosts[%s]\ but\ only\ host\ %s\ are\ connected\ and\ enabled = expect operate on hosts[{0}] but only host {1} are connected and enabled mini\ storage[uuid\:%s]\ has\ to\ be\ empty\ before\ restoring\ bits\ from\ zbox.\ please\ clean\ it\ up. = mini storage[uuid:{0}] has to be empty before restoring bits from zbox. please clean it up. +# In Module: multicast-router +Rendezvous\ Point\ [%s]\ is\ not\ a\ unicast\ address = Rendezvous Point [{0}] is not a unicast address +group\ address\ [%s]\ is\ not\ a\ multicast\ address = group address [{0}] is not a multicast address +rp\ address\ pair\ [%s\:\ %s]\ already\ existed\ for\ multicast\ router\ [uuid\:%s] = rp address pair [{0}: {1}] already existed for multicast router [uuid:{2}] +rp\ address\ tuple\ [%s\ \:\ %s]\ is\ not\ existed\ for\ multicast\ router\ [uuid\:%s] = rp address tuple [{0} : {1}] is not existed for multicast router [uuid:{2}] +multicastRouter[uuid\:%s]\ has\ not\ been\ attached\ to\ vpc\ router = multicastRouter[uuid:{0}] has not been attached to vpc router +multicast\ already\ enabled\ on\ vpc\ router\ uuid[\:%s] = multicast already enabled on vpc router uuid[:{0}] +vpc\ router\ for\ multicast\ router\ [uuid\:%s]\ has\ been\ deleted = vpc router for multicast router [uuid:{0}] has been deleted +multicast\ router\ [uuid\:%s]\ is\ not\ attached\ to\ Vpc\ Router = multicast router [uuid:{0}] is not attached to Vpc Router +multicast\ router\ [uuid\:%s]\ has\ been\ delete\ during\ enable\ multilcast\ on\ backend = multicast router [uuid:{0}] has been delete during enable multilcast on backend + +# In Module: network-plugin +apply\ gratuitous\ arp\ error,\ because\:%s = apply gratuitous arp error, because:{0} +release\ gratuitous\ arp\ error,\ because\:%s = release gratuitous arp error, because:{0} + # In Module: network l2Network[uuid\:%s]\ has\ attached\ to\ cluster[uuid\:%s],\ can't\ attach\ again = l2Network[uuid:{0}] has attached to cluster[uuid:{1}], can''t attach again +could\ not\ attach\ l2\ network,\ because\ there\ is\ another\ network\ [uuid\:%s]\ on\ physical\ interface\ [%s]\ with\ different\ vswitch\ type = could not attach l2 network, because there is another network [uuid:{0}] on physical interface [{1}] with different vswitch type l2Network[uuid\:%s]\ has\ not\ attached\ to\ cluster[uuid\:%s] = l2Network[uuid:{0}] has not attached to cluster[uuid:{1}] type[%s]\ should\ be\ attached\ to\ all\ host = type[{0}] should be attached to all host +could\ not\ attach\ l2Network[uuid\:%s]\ to\ host[uuid\:%s]\ which\ is\ in\ the\ premaintenance\ or\ maintenance\ state = could not attach l2Network[uuid:{0}] to host[uuid:{1}] which is in the premaintenance or maintenance state l2Network[uuid\:%s]\ has\ not\ attached\ to\ cluster\ of\ host[uuid\:%s] = l2Network[uuid:{0}] has not attached to cluster of host[uuid:{1}] l2Network[uuid\:%s]\ has\ not\ attached\ to\ host[uuid\:%s] = l2Network[uuid:{0}] has not attached to host[uuid:{1}] unsupported\ l2Network\ type[%s] = unsupported l2Network type[{0}] unsupported\ vSwitch\ type[%s] = unsupported vSwitch type[{0}] l2\ network[type\:%s]\ does\ not\ support\ update\ virtual\ network\ id = l2 network[type:{0}] does not support update virtual network id +cannot\ update\ virtual\ network\ id\ for\ l2Network[uuid\:%s]\ because\ it\ only\ supports\ an\ L2Network\ that\ is\ exclusively\ attached\ to\ a\ kvm\ cluster = cannot update virtual network id for l2Network[uuid:{0}] because it only supports an L2Network that is exclusively attached to a kvm cluster there's\ no\ host\ in\ cluster[uuid\:\ %s],\ but\ hostParams\ is\ set = there''s no host in cluster[uuid: {0}], but hostParams is set hostUuid\ can\ not\ be\ null\ in\ HostParam = hostUuid can not be null in HostParam host[uuid\:\ %s]\ is\ not\ in\ cluster[uuid\:\ %s] = host[uuid: {0}] is not in cluster[uuid: {1}] @@ -2730,6 +3176,17 @@ There\ has\ been\ a\ l2Network[uuid\:%s,\ name\:%s]\ attached\ to\ cluster[uuid\ There\ has\ been\ a\ L2VlanNetwork[uuid\:%s,\ name\:%s]\ attached\ to\ cluster[uuid\:%s]\ that\ has\ physical\ interface[%s],\ vlan[%s].\ Failed\ to\ attach\ L2VlanNetwork[uuid\:%s] = There has been a L2VlanNetwork[uuid:{0}, name:{1}] attached to cluster[uuid:{2}] that has physical interface[{3}], vlan[{4}]. Failed to attach L2VlanNetwork[uuid:{5}] cannot\ find\ ip\ range\ that\ has\ ip[%s]\ in\ l3Network[uuid\:%s] = cannot find ip range that has ip[{0}] in l3Network[uuid:{1}] IP\ allocator\ strategy[%s]\ failed,\ because\ %s = IP allocator strategy[{0}] failed, because {1} +could\ not\ delete\ ip\ address,\ because\ it's\ used\ by\ vmnic[uuid\:%s] = could not delete ip address, because it''s used by vmnic[uuid:{0}] +could\ not\ reserve\ ip\ range,\ because\ start\ ip[%s]\ is\ not\ valid\ ip\ address = could not reserve ip range, because start ip[{0}] is not valid ip address +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ valid\ ip\ address = could not reserve ip range, because end ip[{0}] is not valid ip address +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ ipv4\ address = could not reserve ip range, because end ip[{0}] is not ipv4 address +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ ipv6\ address = could not reserve ip range, because end ip[{0}] is not ipv6 address +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ less\ than\ start\ ip[%s] = could not reserve ip range, because end ip[{0}] is less than start ip[{1}] +could\ not\ reserve\ ip\ range,\ because\ there\ is\ no\ ipv4\ range = could not reserve ip range, because there is no ipv4 range +could\ not\ reserve\ ip\ range,\ because\ there\ is\ no\ ipv6\ range = could not reserve ip range, because there is no ipv6 range +could\ not\ reserve\ ip\ range,\ because\ reserve\ ip\ is\ not\ in\ ip\ range[%s] = could not reserve ip range, because reserve ip is not in ip range[{0}] +could\ not\ reserve\ ip\ range,\ because\ new\ range\ [%s\:%s]\ is\ overlapped\ with\ old\ range\ [%s\:%s] = could not reserve ip range, because new range [{0}:{1}] is overlapped with old range [{2}:{3}] +could\ not\ set\ mtu\ because\ l2\ network[uuid\:%s]\ of\ l3\ network\ [uuid\:%s]\ mtu\ can\ not\ be\ bigger\ than\ the\ novlan\ network = could not set mtu because l2 network[uuid:{0}] of l3 network [uuid:{1}] mtu can not be bigger than the novlan network can\ not\ delete\ the\ last\ normal\ ip\ range\ because\ there\ is\ still\ has\ address\ pool = can not delete the last normal ip range because there is still has address pool you\ must\ update\ system\ and\ category\ both = you must update system and category both no\ ip\ range\ in\ l3[%s] = no ip range in l3[{0}] @@ -2756,6 +3213,7 @@ ipRangeUuids,\ L3NetworkUuids,\ zoneUuids\ must\ have\ at\ least\ one\ be\ none- all\ the\ specified\ L3\ networks\ are\ IPAM\ disabled,\ cannot\ get\ ip\ address\ capacity = all the specified L3 networks are IPAM disabled, cannot get ip address capacity unsupported\ l3network\ type[%s] = unsupported l3network type[{0}] %s\ is\ not\ a\ valid\ domain\ name = {0} is not a valid domain name +not\ valid\ combination\ of\ system\ and\ category,only\ %s\ are\ valid = not valid combination of system and category,only {0} are valid l3\ network\ [uuid\ %s\:\ name\ %s]\ is\ not\ a\ public\ network,\ address\ pool\ range\ can\ not\ be\ added = l3 network [uuid {0}: name {1}] is not a public network, address pool range can not be added the\ IP\ range[%s\ ~\ %s]\ contains\ D\ class\ addresses\ which\ are\ for\ multicast = the IP range[{0} ~ {1}] contains D class addresses which are for multicast the\ IP\ range[%s\ ~\ %s]\ contains\ E\ class\ addresses\ which\ are\ reserved = the IP range[{0} ~ {1}] contains E class addresses which are reserved @@ -2768,6 +3226,7 @@ gateway[%s]\ is\ not\ a\ IPv4\ address = gateway[{0}] is not a IPv4 address netmask[%s]\ is\ not\ a\ netmask,\ and\ the\ IP\ range\ netmask\ cannot\ be\ 0.0.0.0 = netmask[{0}] is not a netmask, and the IP range netmask cannot be 0.0.0.0 start\ ip[%s]\ is\ behind\ end\ ip[%s] = start ip[{0}] is behind end ip[{1}] overlap\ with\ ip\ range[uuid\:%s,\ start\ ip\:%s,\ end\ ip\:\ %s] = overlap with ip range[uuid:{0}, start ip:{1}, end ip: {2}] +multiple\ CIDR\ on\ the\ same\ L3\ network\ is\ not\ allowed.\ There\ has\ been\ a\ IP\ range[uuid\:%s,\ CIDR\:%s],\ the\ new\ IP\ range[CIDR\:%s]\ is\ not\ in\ the\ CIDR\ with\ the\ existing\ one = multiple CIDR on the same L3 network is not allowed. There has been a IP range[uuid:{0}, CIDR:{1}], the new IP range[CIDR:{2}] is not in the CIDR with the existing one the\ endip[%s]\ is\ not\ in\ the\ subnet\ %s/%s = the endip[{0}] is not in the subnet {1}/{2} gateway[%s]\ can\ not\ be\ part\ of\ range[%s,\ %s] = gateway[{0}] can not be part of range[{1}, {2}] new\ add\ ip\ range\ gateway\ %s\ is\ different\ from\ old\ gateway\ %s = new add ip range gateway {0} is different from old gateway {1} @@ -2807,20 +3266,29 @@ there\ has\ been\ a\ nfs\ primary\ storage\ having\ url\ as\ %s\ in\ zone[uuid\: found\ multiple\ CIDR = found multiple CIDR invalid\ CIDR\:\ %s = invalid CIDR: {0} IP\ address[%s]\ is\ not\ in\ CIDR[%s] = IP address[{0}] is not in CIDR[{1}] +there\ are\ %s\ running\ VMs\ on\ the\ NFS\ primary\ storage,\ please\ stop\ them\ and\ try\ again\:\\n%s\\n = there are {0} running VMs on the NFS primary storage, please stop them and try again:\\n{1}\\n cannot\ find\ usable\ backend = cannot find usable backend no\ usable\ backend\ found = no usable backend found +no\ host\ in\ Connected\ status\ to\ which\ nfs\ primary\ storage[uuid\:%s,\ name\:%s]\ attached\ found\ to\ revert\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s,\ name\:%s] = no host in Connected status to which nfs primary storage[uuid:{0}, name:{1}] attached found to revert volume[uuid:{2}] to snapshot[uuid:{3}, name:{4}] +no\ host\ in\ Connected\ status\ to\ which\ nfs\ primary\ storage[uuid\:%s,\ name\:%s]\ attached\ found\ to\ revert\ volume[uuid\:%s]\ to\ image[uuid\:%s] = no host in Connected status to which nfs primary storage[uuid:{0}, name:{1}] attached found to revert volume[uuid:{2}] to image[uuid:{3}] vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Stopped,\ current\ state\ is\ %s = vm[uuid:{0}] is not Running, Paused or Stopped, current state is {1} primary\ storage[uuid\:%s]\ doesn't\ attach\ to\ any\ cluster = primary storage[uuid:{0}] doesn''t attach to any cluster -host\ where\ vm[uuid\:%s]\ locate\ is\ not\ Connected. = host where vm[uuid:{0}] locate is not Connected. +the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ usable\ host\ to\ create\ the\ data\ volume[uuid\:%s,\ name\:%s] = the NFS primary storage[uuid:{0}, name:{1}] cannot find any usable host to create the data volume[uuid:{2}, name:{3}] +the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the NFS primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ hosts\ in\ attached\ clusters\ to\ perform\ the\ operation = the NFS primary storage[uuid:{0}, name:{1}] cannot find hosts in attached clusters to perform the operation no\ host\ found\ for\ volume[uuid\:%s] = no host found for volume[uuid:{0}] +the\ NFS\ primary\ storage[uuid\:%s]\ is\ not\ attached\ to\ any\ clusters,\ and\ cannot\ expunge\ the\ root\ volume[uuid\:%s]\ of\ the\ VM[uuid\:%s] = the NFS primary storage[uuid:{0}] is not attached to any clusters, and cannot expunge the root volume[uuid:{1}] of the VM[uuid:{2}] cannot\ find\ a\ connected\ host\ in\ cluster\ which\ ps\ [uuid\:\ %s]\ attached = cannot find a connected host in cluster which ps [uuid: {0}] attached cannot\ find\ a\ Connected\ host\ to\ execute\ command\ for\ nfs\ primary\ storage[uuid\:%s] = cannot find a Connected host to execute command for nfs primary storage[uuid:{0}] +cannot\ find\ a\ host\ which\ has\ Connected\ host-NFS\ connection\ to\ execute\ command\ for\ nfs\ primary\ storage[uuid\:%s] = cannot find a host which has Connected host-NFS connection to execute command for nfs primary storage[uuid:{0}] +unable\ to\ attach\ a\ primary\ storage[uuid\:%s,\ name\:%s]\ to\ cluster[uuid\:%s].\ Kvm\ host\ in\ the\ cluster\ has\ qemu-img\ with\ version[%s];\ but\ the\ primary\ storage\ has\ attached\ to\ another\ cluster\ that\ has\ kvm\ host\ which\ has\ qemu-img\ with\ version[%s].\ qemu-img\ version\ greater\ than\ %s\ is\ incompatible\ with\ versions\ less\ than\ %s,\ this\ will\ causes\ volume\ snapshot\ operation\ to\ fail.\ Please\ avoid\ attaching\ a\ primary\ storage\ to\ clusters\ that\ have\ different\ Linux\ distributions,\ in\ order\ to\ prevent\ qemu-img\ version\ mismatch = unable to attach a primary storage[uuid:{0}, name:{1}] to cluster[uuid:{2}]. Kvm host in the cluster has qemu-img with version[{3}]; but the primary storage has attached to another cluster that has kvm host which has qemu-img with version[{4}]. qemu-img version greater than {5} is incompatible with versions less than {6}, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch unable\ to\ create\ folder[installUrl\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = unable to create folder[installUrl:{0}] on kvm host[uuid:{1}, ip:{2}], because {3} no\ host\ in\ is\ Connected\ or\ primary\ storage[uuid\:%s]\ attach\ no\ cluster = no host in is Connected or primary storage[uuid:{0}] attach no cluster +failed\ to\ ping\ nfs\ primary\ storage[uuid\:%s]\ from\ host[uuid\:%s],because\ %s.\ disconnect\ this\ host-ps\ connection = failed to ping nfs primary storage[uuid:{0}] from host[uuid:{1}],because {2}. disconnect this host-ps connection The\ chosen\ host[uuid\:%s]\ to\ perform\ storage\ migration\ is\ lost = The chosen host[uuid:{0}] to perform storage migration is lost failed\ to\ check\ existence\ of\ %s\ on\ nfs\ primary\ storage[uuid\:%s],\ %s = failed to check existence of {0} on nfs primary storage[uuid:{1}], {2} unable\ to\ create\ empty\ volume[uuid\:%s,\ \ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = unable to create empty volume[uuid:{0}, name:{1}] on kvm host[uuid:{2}, ip:{3}], because {4} +failed\ to\ delete\ bits[%s]\ on\ nfs\ primary\ storage[uuid\:%s],\ %s,\ will\ clean\ up\ installPath,\ pinv.getUuid(),\ rsp.getError() = failed to delete bits[{0}] on nfs primary storage[uuid:{1}], {2}, will clean up installPath, pinv.getUuid(), rsp.getError() failed\ to\ revert\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ %s = failed to revert volume[uuid:{0}] to snapshot[uuid:{1}] on kvm host[uuid:{2}, ip:{3}], {4} failed\ to\ revert\ volume[uuid\:%s]\ to\ image[uuid\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ %s = failed to revert volume[uuid:{0}] to image[uuid:{1}] on kvm host[uuid:{2}, ip:{3}], {4} fails\ to\ create\ root\ volume[uuid\:%s]\ from\ cached\ image[path\:%s]\ because\ %s = fails to create root volume[uuid:{0}] from cached image[path:{1}] because {2} @@ -2853,6 +3321,7 @@ Export\ vm\ requires\ an\ ImageStore\ backup\ storage,\ but\ given\ backupStorag Not\ found\ the\ vm\ to\ be\ exported\ with\ the\ uuid\:\ %s = Not found the vm to be exported with the uuid: {0} Only\ vm\ in\ state\:\ %s\ can\ be\ exported. = Only vm in state: {0} can be exported. failed\ to\ parse\ jsonCreateVmParam\ in\ APICreateVmInstanceFromOvfMsg = failed to parse jsonCreateVmParam in APICreateVmInstanceFromOvfMsg +backup\ storage[uuid\:\ %s]\ does\ not\ have\ enough\ available\ capacity\ for\ exporting\ vm[uuid\:\ %s],\ required\ capacity\ is\:\ %d = backup storage[uuid: {0}] does not have enough available capacity for exporting vm[uuid: {1}], required capacity is: {2} failed\ to\ parse\ OVF\ XML\ string = failed to parse OVF XML string ova\ package[uuid\:\ %s]\ not\ found. = ova package[uuid: {0}] not found. Failed\ to\ read\ ovf\ file. = Failed to read ovf file. @@ -2863,6 +3332,7 @@ cancel\ create\ OVF\ VM\ process\ before\ creating\ VM = cancel create OVF VM pr failed\ to\ create\ VM\ from\ OVF\ because\ the\ root\ disk\ of\ the\ VM\ cannot\ be\ found = failed to create VM from OVF because the root disk of the VM cannot be found message\ can\ not\ be\ null = message can not be null ovfInfo\ can\ not\ be\ null = ovfInfo can not be null +failed\ to\ create\ ovf\ bundle\:\ Neither\ the\ OVF\ file\ nor\ the\ custom\ API\ has\ set\ the\ size\ of\ the\ root\ disk,\ so\ unable\ to\ allocate\ root\ disk.\ You\ should\ set\ root\ disk\ size\ in\ CreateVmInstanceFromOvfAction.jsonCreateVmParam.rootDiskSize = failed to create ovf bundle: Neither the OVF file nor the custom API has set the size of the root disk, so unable to allocate root disk. You should set root disk size in CreateVmInstanceFromOvfAction.jsonCreateVmParam.rootDiskSize failed\ to\ create\ ovf\ bundle = failed to create ovf bundle failed\ to\ validate\ ovf\ bundle = failed to validate ovf bundle ovfId\ is\ null = ovfId is null @@ -2903,6 +3373,8 @@ vip\ port\ range[vipStartPort\:%s,\ vipEndPort\:%s]\ overlaps\ with\ rule[uuid\: the\ VM[name\:%s\ uuid\:%s]\ already\ has\ port\ forwarding\ rules\ that\ have\ different\ VIPs\ than\ the\ one[uuid\:%s] = the VM[name:{0} uuid:{1}] already has port forwarding rules that have different VIPs than the one[uuid:{2}] the\ VmNic[uuid\:%s]\ already\ has\ port\ forwarding\ rules\ that\ have\ different\ VIPs\ than\ the\ one[uuid\:%s] = the VmNic[uuid:{0}] already has port forwarding rules that have different VIPs than the one[uuid:{1}] vmNic\ uuid[%s]\ is\ not\ allowed\ add\ portForwarding\ with\ allowedCidr\ rule,\ because\ vmNic\ exist\ eip = vmNic uuid[{0}] is not allowed add portForwarding with allowedCidr rule, because vmNic exist eip +could\ not\ attach\ port\ forwarding\ rule\ with\ allowedCidr,\ because\ vmNic[uuid\:%s]\ already\ has\ rules\ that\ overlap\ the\ target\ private\ port\ ranges[%s,\ %s]\ and\ have\ the\ same\ protocol\ type[%s] = could not attach port forwarding rule with allowedCidr, because vmNic[uuid:{0}] already has rules that overlap the target private port ranges[{1}, {2}] and have the same protocol type[{3}] +could\ not\ attach\ port\ forwarding\ rule,\ because\ vmNic[uuid\:%s]\ already\ has\ a\ rule\ that\ overlaps\ the\ target\ private\ port\ ranges[%s,\ %s],\ has\ the\ same\ protocol\ type[%s]\ and\ has\ AllowedCidr = could not attach port forwarding rule, because vmNic[uuid:{0}] already has a rule that overlaps the target private port ranges[{1}, {2}], has the same protocol type[{3}] and has AllowedCidr unable\ to\ create\ port\ forwarding\ rule,\ extension[%s]\ refused\ it\ because\ %s = unable to create port forwarding rule, extension[{0}] refused it because {1} port\ forwarding\ rule\ [uuid\:%s]\ is\ deleted = port forwarding rule [uuid:{0}] is deleted @@ -2927,6 +3399,7 @@ cannot\ find\ internal\ id\ of\ the\ session[uuid\:%s],\ are\ there\ too\ many\ # In Module: portal no\ service\ configuration\ file\ declares\ message\:\ %s = no service configuration file declares message: {0} management\ node[uuid\:%s]\ is\ not\ ready\ yet = management node[uuid:{0}] is not ready yet +resourceUuid[%s]\ is\ not\ a\ valid\ uuid.\ A\ valid\ uuid\ is\ a\ UUID(v4\ recommended)\ with\ '-'\ stripped.\ see\ http\://en.wikipedia.org/wiki/Universally_unique_identifier\ for\ format\ of\ UUID,\ the\ regular\ expression\ uses\ to\ validate\ a\ UUID\ is\ '[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}' = resourceUuid[{0}] is not a valid uuid. A valid uuid is a UUID(v4 recommended) with ''-'' stripped. see http://en.wikipedia.org/wiki/Universally_unique_identifier for format of UUID, the regular expression uses to validate a UUID is ''[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}'' invalid\ value[%s]\ of\ field[%s] = invalid value[{0}] of field[{1}] invalid\ field[%s]\ for\ %s,\ resource[uuid\:%s,\ type\:%s]\ not\ found = invalid field[{0}] for {1}, resource[uuid:{2}, type:{3}] not found invalid\ field[%s]\ for\ %s,\ resource[uuids\:%s,\ type\:%s]\ not\ found = invalid field[{0}] for {1}, resource[uuids:{2}, type:{3}] not found @@ -2935,12 +3408,14 @@ invalid\ value\ %s\ of\ field[%s] = invalid value {0} of field[{1}] # In Module: resourceconfig resources\ has\ inconsistent\ resourceTypes.\ Details\:\ %s = resources has inconsistent resourceTypes. Details: {0} cannot\ find\ resource[uuid\:\ %s] = cannot find resource[uuid: {0}] +ResourceConfig\ [category\:%s,\ name\:%s]\ cannot\ bind\ to\ resourceType\:\ %s = ResourceConfig [category:{0}, name:{1}] cannot bind to resourceType: {2} no\ global\ config[category\:%s,\ name\:%s]\ found = no global config[category:{0}, name:{1}] found global\ config[category\:%s,\ name\:%s]\ cannot\ bind\ resource = global config[category:{0}, name:{1}] cannot bind resource account\ has\ no\ access\ to\ the\ resource[uuid\:\ %s] = account has no access to the resource[uuid: {0}] # In Module: rest [%s]\ field\ is\ excepted\ an\ int\ or\ long,\ but\ was\ [%s]. = [{0}] field is excepted an int or long, but was [{1}]. +Invalid\ value\ for\ boolean\ field\ [%s],\ [%s]\ is\ not\ a\ valid\ boolean\ string[true,\ false]. = Invalid value for boolean field [{0}], [{1}] is not a valid boolean string[true, false]. # In Module: routeProtocol [%s]\ is\ not\ formatted\ as\ IPv4\ address = [{0}] is not formatted as IPv4 address @@ -2987,6 +3462,7 @@ failed\ to\ check\ physical\ interface\ for\ HardwareVxlanPool[uuid\:%s,\ name\: condition\ name[%s]\ is\ invalid,\ no\ such\ field\ on\ inventory\ class[%s] = condition name[{0}] is invalid, no such field on inventory class[{1}] condition\ name[%s]\ is\ invalid,\ field[%s]\ of\ inventory[%s]\ is\ annotated\ as\ @Unqueryable\ field = condition name[{0}] is invalid, field[{1}] of inventory[{2}] is annotated as @Unqueryable field entity\ meta\ class[%s]\ has\ no\ field[%s] = entity meta class[{0}] has no field[{1}] +field[%s]\ is\ not\ a\ primitive\ of\ the\ inventory\ %s;\ you\ cannot\ specify\ it\ in\ the\ parameter\ 'fields';valid\ fields\ are\ %s = field[{0}] is not a primitive of the inventory {1}; you cannot specify it in the parameter ''fields'';valid fields are {2} filterName\ must\ be\ formatted\ as\ [filterType\:condition(s)] = filterName must be formatted as [filterType:condition(s)] 'value'\ of\ query\ condition\ %s\ cannot\ be\ null = ''value'' of query condition {0} cannot be null search\ module\ disabled = search module disabled @@ -3112,6 +3588,7 @@ security\ group[uuid\:%s]\ is\ not\ owned\ by\ account[uuid\:%s]\ or\ admin = se # In Module: sftpBackupStorage SftpBackupStorage\ doesn't\ support\ scheme[%s]\ in\ url[%s] = SftpBackupStorage doesn''t support scheme[{0}] in url[{1}] fail\ to\ cancel\ download\ image,\ because\ %s = fail to cancel download image, because {0} +the\ uuid\ of\ sftpBackupStorage\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = the uuid of sftpBackupStorage agent changed[expected:{0}, actual:{1}], it''s most likely the agent was manually restarted. Issue a reconnect to sync the status sftp\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = sftp backup storage do not support calculate image hash Please\ stop\ the\ vm\ before\ create\ volume\ template\ to\ sftp\ backup\ storage\ %s = Please stop the vm before create volume template to sftp backup storage {0} duplicate\ backup\ storage.\ There\ has\ been\ a\ sftp\ backup\ storage[hostname\:%s]\ existing = duplicate backup storage. There has been a sftp backup storage[hostname:{0}] existing @@ -3120,30 +3597,45 @@ check\ image\ metadata\ file\:\ %s\ failed = check image metadata file: {0} fail image\ metadata\ file\:\ %s\ is\ not\ exist = image metadata file: {0} is not exist # In Module: sharedMountPointPrimaryStorage +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ in\ the\ clusters\ attached\ with\ the\ shared\ mount\ point\ storage[uuid\:%s]\ are\ disconnected = cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared mount point storage[uuid:{0}] are disconnected vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Stopped,\ current\ state[%s] = vm[uuid:{0}] is not Running, Paused or Stopped, current state[{1}] hosts[uuid\:%s]\ have\ the\ same\ mount\ path,\ but\ actually\ mount\ different\ storage. = hosts[uuid:{0}] have the same mount path, but actually mount different storage. host[uuid\:%s]\ might\ mount\ storage\ which\ is\ different\ from\ SMP[uuid\:%s],\ please\ check\ it = host[uuid:{0}] might mount storage which is different from SMP[uuid:{1}], please check it +the\ shared\ mount\ point\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = the shared mount point primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume +the\ SMP\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the SMP primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected not\ supported\ operation = not supported operation +the\ SMP\ primary\ storage[uuid\:%s]\ is\ not\ attached\ to\ any\ clusters,\ and\ cannot\ expunge\ the\ root\ volume[uuid\:%s]\ of\ the\ VM[uuid\:%s] = the SMP primary storage[uuid:{0}] is not attached to any clusters, and cannot expunge the root volume[uuid:{1}] of the VM[uuid:{2}] cannot\ find\ a\ Connected\ host\ to\ execute\ command\ for\ smp\ primary\ storage[uuid\:%s] = cannot find a Connected host to execute command for smp primary storage[uuid:{0}] cannot\ find\ a\ host\ which\ has\ Connected\ host-SMP\ connection\ to\ execute\ command\ for\ smp\ primary\ storage[uuid\:%s] = cannot find a host which has Connected host-SMP connection to execute command for smp primary storage[uuid:{0}] # In Module: sharedblock sanlock\ says\ host\ %s\ is\ offline\ on\ %s = sanlock says host {0} is offline on {1} +can\ not\ find\ volume\ need\ to\ operate\ shared\ block\ group\ primary\ storage = can not find volume need to operate shared block group primary storage +KVM\ host\ which\ volume[uuid%s]\ attached\ disconnected\ with\ the\ shared\ block\ group\ storage[uuid\:%s] = KVM host which volume[uuid{0}] attached disconnected with the shared block group storage[uuid:{1}] +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ in\ the\ clusters\ attached\ with\ the\ shared\ block\ group\ storage[uuid\:%s]\ are\ disconnected = cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared block group storage[uuid:{0}] are disconnected templated\ vm[uuid\:\ %s]\ cannot\ be\ create\ from\ vm\ with\ scsi\ lun[uuids\:\ %s] = templated vm[uuid: {0}] cannot be create from vm with scsi lun[uuids: {1}] primary\ storage[uuid\:\ %s]\ has\ attached\ the\ scsi\ lun[wwid\:\ %s] = primary storage[uuid: {0}] has attached the scsi lun[wwid: {1}] +the\ vm[uuid\:\ %s]\ does\ not\ has\ additional\ qmp\ socket,\ it\ may\ because\ of\ the\ vm\ start\ without\ the\ global\ config[vm.additionalQmp]\ enabled,\ please\ make\ sure\ it\ enabled\ and\ reboot\ vm\ in\ zstack = the vm[uuid: {0}] does not has additional qmp socket, it may because of the vm start without the global config[vm.additionalQmp] enabled, please make sure it enabled and reboot vm in zstack must\ specify\ at\ least\ one\ disk\ when\ add\ shared\ block\ group\ primary\ storage = must specify at least one disk when add shared block group primary storage +shared\ block[uuid\:%s,\ diskUuid\:%s,\ description\:%s]\ already\ added\ to\ shared\ block\ group[uuid\:%s]in\ new\ shared\ block\ group = shared block[uuid:{0}, diskUuid:{1}, description:{2}] already added to shared block group[uuid:{3}]in new shared block group shared\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage\ can\ not\ resize = shared volume[uuid: {0}] on shared block group primary storage can not resize shared\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage\ has\ attached\ to\ not\ stopped\ vm\ instances[uuids\:\ %s] = shared volume[uuid: {0}] on shared block group primary storage has attached to not stopped vm instances[uuids: {1}] can\ not\ find\ the\ preparation\ of\ the\ volume[%s] = can not find the preparation of the volume[{0}] +use\ the\ thick\ provisioning\ volume\ as\ the\ cache\ volume.\ the\ preparation\ of\ the\ volume[%s]\ is\ %s = use the thick provisioning volume as the cache volume. the preparation of the volume[{0}] is {1} the\ scsi\ lun[uuid\:\ %s,\ wwid\:\ %s]\ is\ already\ attach\ to\ primary\ storage[uuid\:\ %s] = the scsi lun[uuid: {0}, wwid: {1}] is already attach to primary storage[uuid: {2}] can\ not\ found\ any\ cluster\ attached\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %S] = can not found any cluster attached on shared block group primary storage[uuid: %S] +the\ shared\ block\ group\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the shared block group primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected failed\ to\ connect\ to\ all\ clusters%s = failed to connect to all clusters{0} +the\ SharedBlock\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the SharedBlock primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected cannot\ find\ volume\ snapshot[uuid\:%s] = cannot find volume snapshot[uuid:{0}] empty\ migrateVolumeStructs\ in\ migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! = empty migrateVolumeStructs in migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! no\ volume\ in\ migrateVolumeStructs\ in\ migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! = no volume in migrateVolumeStructs in migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! cannot\ find\ an\ available\ host\ to\ execute\ command\ for\ shared\ block\ group\ primary\ storage[uuid\:%s] = cannot find an available host to execute command for shared block group primary storage[uuid:{0}] +cannot\ find\ a\ host\ which\ has\ connected\ shared\ block\ to\ execute\ command\ for\ shared\ block\ group\ primary\ storage[uuid\:%s] = cannot find a host which has connected shared block to execute command for shared block group primary storage[uuid:{0}] +the\ host[uuid\:\ %s]\ running\ on\ is\ not\ available\ to\ resize\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = the host[uuid: {0}] running on is not available to resize volume[uuid: {1}] on shared block group primary storage[uuid: {2}] primary\ storage[uuid\:%s]\ not\ found = primary storage[uuid:{0}] not found volume[uuid\:%s]\ not\ found = volume[uuid:{0}] not found +can\ not\ find\ qualified\ kvm\ host\ for\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = can not find qualified kvm host for shared block group primary storage[uuid: {0}] shared\ volume\ not\ support\ thin\ provisioning = shared volume not support thin provisioning not\ support\ online\ merge\ snapshot\ for\ shareable\ volume[uuid\:\ %s]\ on\ sharedblock = not support online merge snapshot for shareable volume[uuid: {0}] on sharedblock the\ image[uuid\:\ %s,\ name\:%s]\ is\ not\ found\ on\ any\ backup\ storage = the image[uuid: {0}, name:{1}] is not found on any backup storage @@ -3153,17 +3645,26 @@ not\ support\ convert\ thin\ volume\ to\ thick\ volume\ yet = not support conver expected\ status\ is\ %s\ and\ current\ status = expected status is {0} and current status VM[uuid\:%s]\ has\ multiple\ ISOs\ from\ different\ primary\ storage\:\ %s = VM[uuid:{0}] has multiple ISOs from different primary storage: {1} QCow2\ shared\ volume[uuid\:%s]\ is\ not\ supported = QCow2 shared volume[uuid:{0}] is not supported +can\ not\ find\ any\ available\ host\ to\ take\ snapshot\ for\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = can not find any available host to take snapshot for volume[uuid: {0}] on shared block group primary storage[uuid: {1}] only\ support\ full = only support full +can\ not\ find\ any\ available\ host\ to\ migrate\ volume[uuid\:\ %s]\ between\ shared\ block\ group\ primary\ storage[uuid\:\ %s]\ and\ [uuid\:\ %s] = can not find any available host to migrate volume[uuid: {0}] between shared block group primary storage[uuid: {1}] and [uuid: {2}] +can\ not\ find\ any\ available\ host\ to\ migrate\ for\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s]\ and\ [uuid\:\ %s] = can not find any available host to migrate for volume[uuid: {0}] on shared block group primary storage[uuid: {1}] and [uuid: {2}] +can\ not\ find\ hosts\ both\ connect\ to\ primary\ storage[uuid\:\ %s]\ and\ primary\ storage[uuid\:\ %s] = can not find hosts both connect to primary storage[uuid: {0}] and primary storage[uuid: {1}] +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ attached\ with\ the\ shared\ block\ group\ storage[uuid\:%s]\ are\ disconnected = cannot find any connected host to perform the operation, it seems all KVM hosts attached with the shared block group storage[uuid:{0}] are disconnected cannot\ shrink\ snapshot\ %s,\ because\ volume\ %s\ not\ ready = cannot shrink snapshot {0}, because volume {1} not ready cannot\ shrink\ snapshot\ %s,\ beacuse\ vm\ %s\ not\ in\ Running/Stopped\ state = cannot shrink snapshot {0}, beacuse vm {1} not in Running/Stopped state get\ null\ install\ path\ in\ snapshot\ for\ vm\ %s = get null install path in snapshot for vm {0} active\ children\ snapshot\ failed,\ because\ %s = active children snapshot failed, because {0} active\ installPath\ %s\ failed,\ because\ %s = active installPath {0} failed, because {1} +deactive\ installPath\ failed,\ because\ %s = deactive installPath failed, because {0} invalid\ thinProvisioningInitializeSize\ tag = invalid thinProvisioningInitializeSize tag invalid\ thinProvisioningInitializeSize\ tag,\ it\ must\ be\ greater\ than\ or\ equal\ to\ %s = invalid thinProvisioningInitializeSize tag, it must be greater than or equal to {0} invalid\ thinProvisioningInitializeSize,\ it\ is\ not\ a\ number = invalid thinProvisioningInitializeSize, it is not a number invalid\ thinProvisioningInitializeSize\ is\ larger\ than\ %d = invalid thinProvisioningInitializeSize is larger than {0} migrate\ volume\ without\ snapshot\ on\ shared\ block\ is\ not\ support\ to\ cancel. = migrate volume without snapshot on shared block is not support to cancel. +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ whether\ the\ backup\ storage\ is\ attached\ to\ the\ zone[uuid\:%s]\\n2.\ whether\ the\ backup\ storage\ is\ in\ connected\ status;\ try\ to\ reconnect\ it\ if\ not = cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. whether the backup storage is attached to the zone[uuid:{2}]\\n2. whether the backup storage is in connected status; try to reconnect it if not +there\ are\ not\ enough\ capacity\ for\ image[uuid\:\ %s]\ download\ while\ volume[uuid\:\ %s]\ storage\ migration,\ required\ capacity\:\ %s,\ current\ available\ physical\ capacity\:\ %s = there are not enough capacity for image[uuid: {0}] download while volume[uuid: {1}] storage migration, required capacity: {2}, current available physical capacity: {3} +there\ are\ not\ enough\ capacity\ for\ volume[uuid\:\ %s]\ storage\ migration,\ required\ capacity\:\ %s,\ current\ available\ physical\ capacity\:\ %s = there are not enough capacity for volume[uuid: {0}] storage migration, required capacity: {1}, current available physical capacity: {2} data\ on\ source\ ps[uuid\:\ %s]\ has\ been\ discarded,\ not\ support\ rollback = data on source ps[uuid: {0}] has been discarded, not support rollback # In Module: simulator2 @@ -3175,14 +3676,43 @@ set\ to\ disconnected = set to disconnected on\ purpose = on purpose # In Module: slb +could\ not\ create\ slb\ instance\ because\ there\ is\ no\ load\ balancer\ slb\ group\ [uuid\:%s] = could not create slb instance because there is no load balancer slb group [uuid:{0}] +could\ not\ create\ slb\ instance\ because\ there\ is\ no\ slb\ offering\ configured\ for\ slb\ group\ [uuid\:%s] = could not create slb instance because there is no slb offering configured for slb group [uuid:{0}] +could\ not\ create\ slb\ instance\ because\ image\ uuid\ of\ slb\ offering\ [uuid\:%s]\ is\ null = could not create slb instance because image uuid of slb offering [uuid:{0}] is null +could\ not\ create\ slb\ instance\ because\ image\ [uuid\:%s]\ is\ deleted = could not create slb instance because image [uuid:{0}] is deleted could\ not\ create\ slb\ group\ because\ invalid\ front\ l3\ network\ type\ %s = could not create slb group because invalid front l3 network type {0} could\ not\ create\ slb\ group,\ because\ front\ network\ doesn't\ support\ ipv6\ yet = could not create slb group, because front network doesn''t support ipv6 yet +could\ not\ execute\ the\ api\ operation.\ front\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ management\ l3\ network[uuid\:%s]\ cidr\ [%s] = could not execute the api operation. front network [uuid:{0}] cidr [{1}] is overlapped with management l3 network[uuid:{2}] cidr [{3}] could\ not\ create\ slb\ group,\ because\ backend\ network\ doesn't\ support\ ipv6\ yet = could not create slb group, because backend network doesn''t support ipv6 yet +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ frond\ l3\ network[uuid\:%s]\ cidr\ [%s] = could not execute the api operation. backend network [uuid:{0}] cidr [{1}] is overlapped with frond l3 network[uuid:{2}] cidr [{3}] +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ management\ l3\ network[uuid\:%s]\ cidr\ [%s] = could not execute the api operation. backend network [uuid:{0}] cidr [{1}] is overlapped with management l3 network[uuid:{2}] cidr [{3}] +could\ not\ execute\ the\ api\ operation.\ frontend\ network\ [uuid\:%s]\ is\ not\ connected\ vpc\ router = could not execute the api operation. frontend network [uuid:{0}] is not connected vpc router +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ vpc\ network\ because\ frond\ l3\ network\ is\ vpc\ network = could not execute the api operation. backend network [uuid:{0}] must be vpc network because frond l3 network is vpc network +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ not\ connected\ vpc\ router = could not execute the api operation. backend network [uuid:{0}] is not connected vpc router +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ while\ front\ network\ is\ connected\ to\ vpc\ router[uuid\:%s] = could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] while front network is connected to vpc router[uuid:{2}] +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ private\ flat\ network\ because\ frond\ l3\ network\ is\ private\ flat\ network = could not execute the api operation. backend network [uuid:{0}] must be private flat network because frond l3 network is private flat network +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ which\ is\ not\ connect\ to\ front\ network[uuid\:%s] = could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] which is not connect to front network[uuid:{2}] +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ vpc\ network\ because\ other\ backend\ network\ is\ vpc\ network = could not execute the api operation. backend network [uuid:{0}] must be vpc network because other backend network is vpc network +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ while\ other\ backend\ network\ is\ connected\ to\ vpc\ router[uuid\:%s] = could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] while other backend network is connected to vpc router[uuid:{2}] +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ can\ not\ be\ vpc\ network\ because\ other\ backend\ network\ is\ not\ vpc\ network = could not execute the api operation. backend network [uuid:{0}] can not be vpc network because other backend network is not vpc network could\ not\ create\ slb\ group\ because\ invalid\ deploy\ type\ %s = could not create slb group because invalid deploy type {0} could\ not\ create\ slb\ group\ because\ invalid\ backend\ type\ %s = could not create slb group because invalid backend type {0} +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv4\ address[%s]\ format\ error = can not attach l3 network [uuid:{0}] to SLB instance, because ipv4 address[{1}] format error +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv4\ netmask[%s]\ format\ error = can not attach l3 network [uuid:{0}] to SLB instance, because ipv4 netmask[{1}] format error +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ip\ address\ and\ netmask\ must\ be\ set\ in\ systemTag = can not attach l3 network [uuid:{0}] to SLB instance, because ip address and netmask must be set in systemTag +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv6\ address[%s]\ format\ error = can not attach l3 network [uuid:{0}] to SLB instance, because ipv6 address[{1}] format error +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv6\ prefix[%s]\ format\ error = can not attach l3 network [uuid:{0}] to SLB instance, because ipv6 prefix[{1}] format error +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ip\ address\ and\ prefix\ must\ be\ set\ in\ systemTag = can not attach l3 network [uuid:{0}] to SLB instance, because ip address and prefix must be set in systemTag can\ not\ detach\ front\ end\ l3\ network\ [uuid\:%s]\ from\ SLB\ instance = can not detach front end l3 network [uuid:{0}] from SLB instance can\ not\ detach\ management\ l3\ network\ [uuid\:%s]\ from\ SLB\ instance = can not detach management l3 network [uuid:{0}] from SLB instance +can\ not\ detach\ nic\ [uuid\:%s]\ from\ SLB\ instance,\ because\ it\ is\ the\ last\ backend\ l3\ network\ nic = can not detach nic [uuid:{0}] from SLB instance, because it is the last backend l3 network nic +can\ not\ create\ load\ balancer\ because\ vip\ [uuid\:%s]\ has\ attached\ other\ network\ service\ [%s] = can not create load balancer because vip [uuid:{0}] has attached other network service [{1}] +can\ not\ create\ load\ balancer\ because\ vip\ [uuid\:%s]\ has\ attached\ to\ vpc\ router\ [%s] = can not create load balancer because vip [uuid:{0}] has attached to vpc router [{1}] can\ not\ create\ load\ balancer\ because\ invalid\ slb\ group\ [uuid\:%s] = can not create load balancer because invalid slb group [uuid:{0}] +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network\ [uuid\:%s]\ is\ connected\ any\ vpc\ router = could not add vmnic to load balancer server group because l3 network [uuid:{0}] is connected any vpc router +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network[uuid\:%s]\ is\ connected\ to\ different\ vpc\ router = could not add vmnic to load balancer server group because l3 network[uuid:{0}] is connected to different vpc router +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network\ is\ not\ connected\ slb\ instance = could not add vmnic to load balancer server group because l3 network is not connected slb instance +can\ not\ find\ nic\ of\ slb\ instance\ [uuid\:%s]\ which\ is\ attached\ to\ slb\ group\ front\ l3\ network\ [uuid\:%s] = can not find nic of slb instance [uuid:{0}] which is attached to slb group front l3 network [uuid:{1}] failed\ to\ create\ vip%s\ on\ virtual\ router[uuid\:%s],\ because\ %s = failed to create vip{0} on virtual router[uuid:{1}], because {2} can\ not\ find\ slb\ vm\ instance = can not find slb vm instance @@ -3191,6 +3721,12 @@ Failed\ to\ create\ SNMP\ agent,\ because\ snmp\ agent\ already\ created. = Fail Failed\ to\ stop\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = Failed to stop SNMP agent, please create a snmp agent first. Failed\ to\ update\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = Failed to update SNMP agent, please create a snmp agent first. Failed\ to\ start\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = Failed to start SNMP agent, please create a snmp agent first. +Failed\ to\ %s\ SNMP\ agent,\ because\ readCommunity\ can\ not\ be\ empty\ when\ version\ is\ v2c = Failed to {0} SNMP agent, because readCommunity can not be empty when version is v2c +Failed\ to\ %s\ SNMP\ agent,\ because\ userName\ can\ not\ be\ empty\ when\ version\ is\ v3 = Failed to {0} SNMP agent, because userName can not be empty when version is v3 +Failed\ to\ %s\ SNMP\ agent,\ auth\ algorithm\ can\ not\ be\ null\ when\ password\ is\ not\ null. = Failed to {0} SNMP agent, auth algorithm can not be null when password is not null. +Failed\ to\ %s\ SNMP\ agent,\ because\ auth\ password\ can\ not\ be\ empty. = Failed to {0} SNMP agent, because auth password can not be empty. +Failed\ to\ %s\ SNMP\ agent,\ because\ setting\ data\ encryption\ requires\ setting\ user\ verification\ first. = Failed to {0} SNMP agent, because setting data encryption requires setting user verification first. +Failed\ to\ %s\ SNMP\ agent,\ because\ privacy\ password\ can\ not\ be\ empty. = Failed to {0} SNMP agent, because privacy password can not be empty. can't\ get\ SnmpAgentImpl\ instance,\ due\ to\ no\ SnmpAgentVO\ exist. = can''t get SnmpAgentImpl instance, due to no SnmpAgentVO exist. more\ than\ one\ SnmpAgentVO\ exist. = more than one SnmpAgentVO exist. failed\ to\ start\ snmp\ agent[%s]\ on\ port\ %s,\ due\ to\ %s = failed to start snmp agent[{0}] on port {1}, due to {2} @@ -3198,6 +3734,11 @@ snmp[uuid\:%s]\ has\ not\ been\ created = snmp[uuid:{0}] has not been created failed\ to\ change\ snmp\ agent\ port\ from\ %s\ to\ %s,\ duet\ to\ %s = failed to change snmp agent port from {0} to {1}, duet to {2} failed\ to\ close\ snmp\ agent\ session[%s]\ on\ port\ %s,\ due\ to\ %s = failed to close snmp agent session[{0}] on port {1}, due to {2} +# In Module: sns-aliyun-sms +Aliyun\ account[uuid\:%s]\ not\ exists = Aliyun account[uuid:{0}] not exists +invalid\ phone\ number[%s],\ sms\ number\ is\ like\ +86-18654321234 = invalid phone number[{0}], sms number is like +86-18654321234 +Aliyun\ sms\ event\ text\ template\ not\ found. = Aliyun sms event text template not found. + # In Module: sns uuid\ [%s]\ already\ exists = uuid [{0}] already exists smtpServer\ cannot\ null = smtpServer cannot null @@ -3210,7 +3751,6 @@ can\ not\ create\ snmp\ platform\ with\ same\ address[%s\:%s] = can not create s can\ not\ add\ same\ email\ address\ to\ endpoint[uuid\:%s] = can not add same email address to endpoint[uuid:{0}] cannot\ update\ email\ address\ to\ %s,\ which\ is\ already\ exists\ in\ endpoint[uuid\:%s] = cannot update email address to {0}, which is already exists in endpoint[uuid:{1}] phone\ number\ [%s]\ already\ exists = phone number [{0}] already exists -invalid\ phone\ number[%s],\ sms\ number\ is\ like\ +86-18654321234 = invalid phone number[{0}], sms number is like +86-18654321234 invalid\ url[%s] = invalid url[{0}] [%s]\ is\ not\ a\ legal\ ip = [{0}] is not a legal ip invalid\ phone\ number[%s],\ the\ DingDing\ phone\ number\ is\ like\ +86-12388889999 = invalid phone number[{0}], the DingDing phone number is like +86-12388889999 @@ -3240,6 +3780,24 @@ only\ HTTP\ endpoint\ can\ subscribe\ API\ topic,\ the\ endpoint[type\:%s]\ is\ API\ topic\ cannot\ be\ deleted = API topic cannot be deleted system\ alarm\ topic\ cannot\ be\ deleted = system alarm topic cannot be deleted +# In Module: software-package-plugin +shell\ command\ failed = shell command failed +path\ cannot\ be\ null\ or\ empty = path cannot be null or empty +invalid\ path\:\ %s,\ %s = invalid path: {0}, {1} +invalid\ path\:\ %s = invalid path: {0} +filesystem\ stat\ failed = filesystem stat failed +invalid\ df\ output = invalid df output +invalid\ number\ format = invalid number format +Invalid\ install\ path\ detected\:\ %s.\ Paths\ must\ only\ contain\ letters,\ numbers,\ underscores,\ dashes,\ colons,\ spaces,\ dots\ and\ slashes.\ Path\ traversal\ sequences\ (..\ and\ //)\ are\ not\ allowed.\ Path\ must\ be\ absolute.\ Path\ must\ not\ be\ root\ path = Invalid install path detected: {0}. Paths must only contain letters, numbers, underscores, dashes, colons, spaces, dots and slashes. Path traversal sequences (.. and //) are not allowed. Path must be absolute. Path must not be root path +software\ package\ [%s]\ cannot\ be\ installed\ in\ current\ state\ [%s].\ Allowed\ states\:\ %s\ or\ %s. = software package [{0}] cannot be installed in current state [{1}]. Allowed states: {2} or {3}. +software\ package\ [%s]\ cannot\ be\ uninstalled\ in\ current\ state\ [%s].\ Allowed\ states\:\ %s. = software package [{0}] cannot be uninstalled in current state [{1}]. Allowed states: {2}. +failed\ to\ identify\ software\ package\ type.\ package\:\ %s,\ installPath\:\ %s,\ unzipPath\:\ %s.\ please\ verify\ the\ package\ format\ is\ correct\ and\ a\ corresponding\ extension\ point\ is\ registered. = failed to identify software package type. package: {0}, installPath: {1}, unzipPath: {2}. please verify the package format is correct and a corresponding extension point is registered. +a\ non-management\ node\ installation\ of\ the\ software\ package\ is\ detected\ in\ this\ environment.\ to\ proceed\ with\ a\ new\ management\ node-based\ installation,\ please\ first\:\\n1.\ uninstall\ the\ existing\ manually\ installed\ components\\n2.\ ensure\ the\ environment\ is\ completely\ clean\\nnote\:\ this\ installation\ must\ be\ performed\ exclusively\ through\ the\ management\ node = a non-management node installation of the software package is detected in this environment. to proceed with a new management node-based installation, please first:\\n1. uninstall the existing manually installed components\\n2. ensure the environment is completely clean\\nnote: this installation must be performed exclusively through the management node +no\ extension\ point\ found\ for\ software\ package\ type\:\ %s = no extension point found for software package type: {0} +software\ package\ [uuid\:%s]\ not\ found = software package [uuid:{0}] not found +failed\ to\ get\ software\ package\ type = failed to get software package type +upload\ software\ package\ session\ expired = upload software package session expired + # In Module: sshKeyPair The\ sshKeyPair\ already\ upload = The sshKeyPair already upload The\ sshKeyPair[uuid\:%s]\ was\ in\ using. = The sshKeyPair[uuid:{0}] was in using. @@ -3253,6 +3811,69 @@ Cannot\ generate\ sshKeyPair,\ error\:\ %s = Cannot generate sshKeyPair, error: failed\ to\ load\ the\ public\ key\:\ %s,\ err\:\ %s = failed to load the public key: {0}, err: {1} ssh\ key\ pair[uuid\:%s]\ can\ not\ associated\ to\ vm[uuid\:%s]\ due\ to\ the\ key\ not\ found = ssh key pair[uuid:{0}] can not associated to vm[uuid:{1}] due to the key not found +# In Module: sso-plugin +SSO\ client\ type[%s]\ not\ support\ for\ DeleteSSOClientAction = SSO client type[{0}] not support for DeleteSSOClientAction +casClient[uuid\:%s,\ name\:%s]\ has\ been\ deleted = casClient[uuid:{0}, name:{1}] has been deleted +unable\ to\ find\ CAS\ client[uuid\=%s] = unable to find CAS client[uuid={0}] +duplicate\ CAS\ server[serverName\=%s] = duplicate CAS server[serverName={0}] +url\ is\ error,\ clientUuid\ is\ miss = url is error, clientUuid is miss +\ missing\ cas\ client,\ please\ create\ cas\ client\ before\ sso = missing cas client, please create cas client before sso +failed\ to\ find\ account\ for\ CAS\ user[name\=%s] = failed to find account for CAS user[name={0}] +multiple\ accounts\ found\ for\ CAS\ user[name\=%s] = multiple accounts found for CAS user[name={0}] +oAuth2Client[uuid\:%s,\ name\:%s]\ has\ been\ deleted = oAuth2Client[uuid:{0}, name:{1}] has been deleted +redirectUrl\ is\ error,\ %s = redirectUrl is error, {0} +unable\ to\ find\ OAuth2\ client[uuid\=%s] = unable to find OAuth2 client[uuid={0}] +duplicate\ oauth2\ server[authorizationUrl\=%s] = duplicate oauth2 server[authorizationUrl={0}] +multiple\ accounts\ found\ for\ OAuth2\ user[sub\=%s] = multiple accounts found for OAuth2 user[sub={0}] +failed\ to\ find\ account\ for\ OAuth2\ user[name\=%s],\ maybe\ logging\ in\ for\ the\ first\ time = failed to find account for OAuth2 user[name={0}], maybe logging in for the first time +multiple\ accounts\ found\ for\ OAuth2\ user[AccountVO.username\=%s] = multiple accounts found for OAuth2 user[AccountVO.username={0}] +there\ was\ an\ error,\ reason\:\ \ token\ response\ is\ null = there was an error, reason: token response is null +there\ was\ an\ error,\ reason\:\ \ %s\ is\ null = there was an error, reason: {0} is null +response\ has\ error\ \:\ %s = response has error : {0} +failed\ to\ send\ response\ to\ oauth\ server = failed to send response to oauth server +get\ code\ response\ has\ error\ \:\ %s = get code response has error : {0} +error\ requesting\ token\ in\ clientUuid[%s],\ reason\:\ %s = error requesting token in clientUuid[{0}], reason: {1} +failed\ to\ post\ %s\ with\ unexpected\ status\ code\ %s = failed to post {0} with unexpected status code {1} +failed\ to\ post\ %s\ with\ IO\ error = failed to post {0} with IO error +unable\ to\ find\ oAuth2Token[userUuid\=%s] = unable to find oAuth2Token[userUuid={0}] + +# In Module: storage-device +scsi\ lun[uuid\:\ %s]\ and\ [uuid\:\ %s]\ does\ not\ has\ a\ common\ host = scsi lun[uuid: {0}] and [uuid: {1}] does not has a common host +scsi\ lun[uuid\:\ %s]\ is\ in\ disabled\ state = scsi lun[uuid: {0}] is in disabled state +the\ specific\ SCSI\ lun\ required = the specific SCSI lun required +do\ not\ support\ migration\ of\ vm[uuid\:%s]\ with\ shared\ block = do not support migration of vm[uuid:{0}] with shared block +NVMe\ server[ip\:\ %s,\ port\:\ %s,\ transport\:\ %s]\ already\ exists = NVMe server[ip: {0}, port: {1}, transport: {2}] already exists +NVMe\ server\ ip\:\ %s\ is\ not\ valid = NVMe server ip: {0} is not valid +NVMe\ server[uuid\:\ %s]\ already\ attached\ to\ cluster[uuid\:\ %s] = NVMe server[uuid: {0}] already attached to cluster[uuid: {1}] +iSCSI\ server[ip\:\ %s,\ port\:\ %s]\ already\ exists = iSCSI server[ip: {0}, port: {1}] already exists +iSCSI\ server\ ip\:\ %s\ is\ not\ valid = iSCSI server ip: {0} is not valid +iSCSI\ server[uuid\:\ %s]\ already\ attached\ to\ cluster[uuid\:\ %s] = iSCSI server[uuid: {0}] already attached to cluster[uuid: {1}] +iSCSI\ server[uuid\:\ %s]\ not\ attached\ to\ cluster[uuid\:\ %s] = iSCSI server[uuid: {0}] not attached to cluster[uuid: {1}] +iSCSI\ server[uuid\:\ %s]\ still\ attached\ to\ cluster[uuid\:\ %s] = iSCSI server[uuid: {0}] still attached to cluster[uuid: {1}] +scsi\ lun[wwid\:\ %s]\ has\ been\ attached\ to\ vm\ instance\ %s = scsi lun[wwid: {0}] has been attached to vm instance {1} +scisLun[uuids\:%s]\ are\ not\ attach\ to\ the\ cluster\ of\ host[uuid\:%s] = scisLun[uuids:{0}] are not attach to the cluster of host[uuid:{1}] +please\ umount\ all\ block\ devices\ of\ the\ vm[%s]\ and\ try\ again = please umount all block devices of the vm[{0}] and try again +hba\ scan\ is\ error\:\ %s = hba scan is error: {0} +scsi\ lun[wwid\:%s]\ has\ been\ attached\ into\ the\ vm[%s] = scsi lun[wwid:{0}] has been attached into the vm[{1}] +vm\ instance[%s]\ state[%s]\ not\ in\ allowed\ state[%s]\ for\ operation = vm instance[{0}] state[{1}] not in allowed state[{2}] for operation +vm\ instance[%s]\ host[uuid\:\ %s]\ not\ attached\ scsi\ lun[uuid\:\ %s] = vm instance[{0}] host[uuid: {1}] not attached scsi lun[uuid: {2}] +different\ nvme\ targets\ were\ found\ on\ host[%s]\ and\ host[%s] = different nvme targets were found on host[{0}] and host[{1}] +SCSI\ LUN[%s]\ is\ attached\ to\ VM\ [%s] = SCSI LUN[{0}] is attached to VM [{1}] +SCSI\ LUN[%s]\ record\ not\ found\ on\ host\ [%s] = SCSI LUN[{0}] record not found on host [{1}] +unexpected\ hypervisor\ type[%s]\ for\ host\ [%s] = unexpected hypervisor type[{0}] for host [{1}] +different\ iscsi\ configuration\ were\ found\ on\ host[uuid\:%s,\ targets\:%s]and\ host[uuid\:%s,\ targets\:%s] = different iscsi configuration were found on host[uuid:{0}, targets:{1}]and host[uuid:{2}, targets:{3}] +different\ disk\ types\ are\ found\ in\ different\ hosts\ for\ lun[serial\:%s],\ unable\ to\ attach\ it\ to\ cluster = different disk types are found in different hosts for lun[serial:{0}], unable to attach it to cluster +specified\ scsi\ lun[wwid\:\ %s]\ not\ exists\ or\ disabled = specified scsi lun[wwid: {0}] not exists or disabled +scsi[%s]\ lun[wwid\:%s]\ has\ been\ attached\ into\ the\ vm[%s] = scsi[{0}] lun[wwid:{1}] has been attached into the vm[{2}] +vm\ instance[%s]\ state\ [%s]\ not\ in\ allowed\ state[%s]\ for\ operation = vm instance[{0}] state [{1}] not in allowed state[{2}] for operation +vm\ instance[uuid\:\ %s]\ host[uuid\:\ %s]\ not\ attached\ scsi\ lun[uuid\:\ %s] = vm instance[uuid: {0}] host[uuid: {1}] not attached scsi lun[uuid: {2}] + +# In Module: storage-ha-plugin +not\ found\ hostId\ for\ hostUuid[%s]\ and\ primaryStorageUuid[%s] = not found hostId for hostUuid[{0}] and primaryStorageUuid[{1}] +host\ %s's\ heartbeat\ is\ not\ updated = host {0}''s heartbeat is not updated +host[uuid\:%s]'s\ heartbeat\ is\ not\ updated = host[uuid:{0}]''s heartbeat is not updated +shareblock\ says\ host\ %s\ is\ offline\ on\ %s = shareblock says host {0} is offline on {1} + # In Module: storage null\ installPath\ returned\ from\ driver\:\ %s = null installPath returned from driver: {0} %s\:\ health\ state\:\ %s = {0}: health state: {1} @@ -3262,6 +3883,8 @@ no\ backup\ storage\ type\ specified\ support\ to\ primary\ storage[uuid\:%s] = root\ image\ and\ root\ image\ cache\ has\ been\ deleted,\ cannot\ reimage\ now = root image and root image cache has been deleted, cannot reimage now storage\ is\ not\ healthy\:%s = storage is not healthy:{0} No\ primary\ storage\ plugin\ registered\ with\ identity\:\ %s = No primary storage plugin registered with identity: {0} +not\ support\ protocol[%s]\ on\ type[%s]\ primary\ storage = not support protocol[{0}] on type[{1}] primary storage +not\ support\ take\ volumes\ snapshots\ on\ multiple\ ps\ when\ including\ storage\ snapshot = not support take volumes snapshots on multiple ps when including storage snapshot cannot\ find\ ExternalPrimaryStorage[uuid\:%s] = cannot find ExternalPrimaryStorage[uuid:{0}] cannot\ connect\ any\ external\ storage = cannot connect any external storage %s\ should\ not\ be\ null = {0} should not be null @@ -3271,6 +3894,8 @@ backup\ storage[uuid\:%s]\ has\ been\ attached\ to\ zone[uuid\:%s] = backup stor failed\ to\ get\ header\ of\ image\ url\ %s\:\ %s = failed to get header of image url {0}: {1} failed\ to\ get\ header\ of\ image\ url\ %s = failed to get header of image url {0} the\ backup\ storage[uuid\:%s,\ name\:%s]\ has\ not\ enough\ capacity\ to\ download\ the\ image[%s].\ Required\ size\:%s,\ available\ size\:%s = the backup storage[uuid:{0}, name:{1}] has not enough capacity to download the image[{2}]. Required size:{3}, available size:{4} +the\ image\ size\ get\ from\ url\ %s\ is\ %d\ bytes,\ it's\ too\ small\ for\ an\ image,\ please\ check\ the\ url\ again. = the image size get from url {0} is {1} bytes, it''s too small for an image, please check the url again. +the\ backup\ storage[uuid\:%s,\ name\:%s]\ has\ not\ enough\ capacity\ to\ download\ the\ image[%s].Required\ size\:%s,\ available\ size\:%s = the backup storage[uuid:{0}, name:{1}] has not enough capacity to download the image[{2}].Required size:{3}, available size:{4} backup\ storage\ cannot\ proceed\ message[%s]\ because\ its\ status\ is\ %s = backup storage cannot proceed message[{0}] because its status is {1} backup\ storage\ cannot\ proceed\ message[%s]\ because\ its\ state\ is\ %s = backup storage cannot proceed message[{0}] because its state is {1} cannot\ reserve\ %s\ on\ the\ backup\ storage[uuid\:%s],\ it\ only\ has\ %s\ available = cannot reserve {0} on the backup storage[uuid:{1}], it only has {2} available @@ -3279,6 +3904,7 @@ only\ one\ backup\ storage\ data\ network\ system\ tag\ is\ allowed,\ but\ %s\ g required\ primary\ storage[uuid\:%s,\ type\:%s]\ could\ not\ support\ any\ backup\ storage. = required primary storage[uuid:{0}, type:{1}] could not support any backup storage. after\ subtracting\ reserved\ capacity,\ no\ backup\ storage\ has\ required\ capacity[%s\ bytes] = after subtracting reserved capacity, no backup storage has required capacity[{0} bytes] unable\ to\ allocate\ a\ backup\ storage = unable to allocate a backup storage +outputProtocol[%s]\ is\ exist\ on\ primary\ storage[%s]no\ need\ to\ add\ again = outputProtocol[{0}] is exist on primary storage[{1}]no need to add again unknown\ primary\ storage\ type[%s] = unknown primary storage type[{0}] zoneUuids,\ clusterUuids,\ primaryStorageUuids\ must\ have\ at\ least\ one\ be\ none-empty\ list,\ or\ all\ is\ set\ to\ true = zoneUuids, clusterUuids, primaryStorageUuids must have at least one be none-empty list, or all is set to true primary\ storage[uuid\:%s]\ has\ not\ been\ attached\ to\ cluster[uuid\:%s]\ yet = primary storage[uuid:{0}] has not been attached to cluster[uuid:{1}] yet @@ -3286,11 +3912,17 @@ primary\ storage[uuid\:%s]\ has\ been\ attached\ to\ cluster[uuid\:%s] = primary primary\ storage[uuid\:%s]\ and\ cluster[uuid\:%s]\ are\ not\ in\ the\ same\ zone = primary storage[uuid:{0}] and cluster[uuid:{1}] are not in the same zone url[%s]\ has\ been\ occupied,\ it\ cannot\ be\ duplicate\ in\ same\ cluster = url[{0}] has been occupied, it cannot be duplicate in same cluster 'resourceUuid'\ and\ 'resourceType'\ must\ be\ set\ both\ or\ neither! = ''resourceUuid'' and ''resourceType'' must be set both or neither! +primary\ storage(s)\ [uuid\:\ %s]\ where\ volume(s)\ locate\ is\ not\ Enabled\ or\ Connected = primary storage(s) [uuid: {0}] where volume(s) locate is not Enabled or Connected +after\ removing\ primary\ storage%s\ to\ avoid,\ there\ is\ no\ candidate\ primary\ storage\ anymore.\ please\ check\ primary\ storage\ status\ and\ state\ in\ the\ cluster. = after removing primary storage{0} to avoid, there is no candidate primary storage anymore. please check primary storage status and state in the cluster. primary\ storage[uuid\:%s]\ is\ not\ Connected = primary storage[uuid:{0}] is not Connected backup\ storage[uuid\:%s]\ is\ not\ attached\ to\ zone[uuid\:%s]\ the\ primary\ storage[uuid\:%s]\ belongs\ to = backup storage[uuid:{0}] is not attached to zone[uuid:{1}] the primary storage[uuid:{2}] belongs to volume[uuid\:%s]\ has\ been\ attached\ a\ %s\ VM.\ VM\ should\ be\ Stopped. = volume[uuid:{0}] has been attached a {1} VM. VM should be Stopped. +primary\ storage[uuid\:%s]\ cannot\ be\ deleted\ for\ still\ being\ attached\ to\ cluster[uuid\:%s]. = primary storage[uuid:{0}] cannot be deleted for still being attached to cluster[uuid:{1}]. cannot\ attach\ volume[uuid\:%s]\ whose\ primary\ storage\ is\ Maintenance = cannot attach volume[uuid:{0}] whose primary storage is Maintenance +cannot\ reserve\ %s\ bytes\ on\ the\ primary\ storage[uuid\:%s],\ it's\ short\ of\ available\ capacity = cannot reserve {0} bytes on the primary storage[uuid:{1}], it''s short of available capacity +the\ primary\ storage[uuid\:%s]\ is\ not\ in\ status\ of\ Connected,\ current\ status\ is\ %s = the primary storage[uuid:{0}] is not in status of Connected, current status is {1} PrimaryStorageFeatureAllocatorFlow[%s]\ returns\ zero\ primary\ storage\ candidate = PrimaryStorageFeatureAllocatorFlow[{0}] returns zero primary storage candidate +cannot\ find\ primary\ storage\ satisfying\ conditions[connected\ to\ host\:%s,\ state\:%s,\ status\:\ %s,\ available\ capacity\ >\ %s = cannot find primary storage satisfying conditions[connected to host:{0}, state:{1}, status: {2}, available capacity > {3} %s\ is\ invalid.\ %s\ is\ not\ a\ valid\ zstack\ uuid = {0} is invalid. {1} is not a valid zstack uuid no\ primary\ storage[uuid\:%s]\ found = no primary storage[uuid:{0}] found primaryStorage[uuid\=%s]\ does\ not\ exist = primaryStorage[uuid={0}] does not exist @@ -3307,6 +3939,9 @@ cannot\ find\ primary\ storage[uuid\:%s],\ the\ uuid\ is\ specified\ in\ instanc cannot\ find\ primary\ storage\ having\ user\ tag[%s].\ The\ user\ tag\ is\ specified\ in\ instance\ offering\ or\ disk\ offering = cannot find primary storage having user tag[{0}]. The user tag is specified in instance offering or disk offering PrimaryStorageTagAllocatorExtensionPoint[%s]\ returns\ zero\ primary\ storage\ candidate = PrimaryStorageTagAllocatorExtensionPoint[{0}] returns zero primary storage candidate failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ not\ exists. = failed to cancel deletion job. Volume[uuid:{0}] not exists. +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ not\ attached\ to\ any\ vm,\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = failed to cancel deletion job. Volume[uuid:{0}] not attached to any vm, offline snapshot deletion do not support cancel. +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ attached\ vm\ not\ exists,\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = failed to cancel deletion job. Volume[uuid:{0}] attached vm not exists, offline snapshot deletion do not support cancel. +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ attached\ vm\ not\ in\ state\ %s\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = failed to cancel deletion job. Volume[uuid:{0}] attached vm not in state {1} offline snapshot deletion do not support cancel. volume\ snapshot[uuids\:%s]\ is\ in\ state\ Disabled,\ cannot\ revert\ volume\ to\ it = volume snapshot[uuids:{0}] is in state Disabled, cannot revert volume to it Can\ not\ take\ memory\ snapshot,\ expected\ vm\ states\ are\ [%s,\ %s] = Can not take memory snapshot, expected vm states are [{0}, {1}] volume\ snapshot[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = volume snapshot[uuid:{0}] is in state {1}, cannot revert volume to it @@ -3316,20 +3951,27 @@ can\ not\ find\ volume\ uuid\ for\ snapshosts[uuid\:\ %s] = can not find volume Unsupported\ maximum\ snapshot\ number\ (%d)\ for\ volume\ [uuid\:%s] = Unsupported maximum snapshot number ({0}) for volume [uuid:{1}] cannot\ find\ type\ for\ primaryStorage\ [%s] = cannot find type for primaryStorage [{0}] cannot\ ask\ primary\ storage[uuid\:%s]\ for\ volume\ snapshot\ capability = cannot ask primary storage[uuid:{0}] for volume snapshot capability +primary\ storage[uuid\:%s]\ doesn't\ support\ volume\ snapshot;\ cannot\ create\ snapshot\ for\ volume[uuid\:%s] = primary storage[uuid:{0}] doesn''t support volume snapshot; cannot create snapshot for volume[uuid:{1}] cannot\ find\ snapshot\:\ %s = cannot find snapshot: {0} this\ resource\ type\ %s\ does\ not\ support\ querying\ memory\ snapshot\ references = this resource type {0} does not support querying memory snapshot references cannot\ find\ VmInstanceResourceMetadataGroupVO\ of\ the\ memory\ snapshot\ group[uuid\:%s] = cannot find VmInstanceResourceMetadataGroupVO of the memory snapshot group[uuid:{0}] snapshot[uuid\:%s,\ name\:%s]'s\ status[%s]\ is\ not\ allowed\ for\ message[%s],\ allowed\ status%s = snapshot[uuid:{0}, name:{1}]''s status[{2}] is not allowed for message[{3}], allowed status{4} cannot\ find\ volume\ snapshot[uuid\:%s,\ name\:%s],\ it\ may\ have\ been\ deleted\ by\ previous\ operation = cannot find volume snapshot[uuid:{0}, name:{1}], it may have been deleted by previous operation snapshot\ or\ its\ desendant\ has\ reference\ volume[uuids\:%s] = snapshot or its desendant has reference volume[uuids:{0}] +vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Destroyed,\ Stopped,\ Destroying,\ current\ state[%s] = vm[uuid:{0}] is not Running, Paused or Destroyed, Stopped, Destroying, current state[{1}] failed\ to\ change\ status\ of\ volume\ snapshot[uuid\:%s,\ name\:%s]\ by\ status\ event[%s] = failed to change status of volume snapshot[uuid:{0}, name:{1}] by status event[{2}] +unable\ to\ reset\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s],\ the\ vm[uuid\:%s]\ volume\ attached\ to\ is\ not\ in\ Stopped\ state,\ current\ state\ is\ %s = unable to reset volume[uuid:{0}] to snapshot[uuid:{1}], the vm[uuid:{2}] volume attached to is not in Stopped state, current state is {3} snapshot(s)\ %s\ in\ the\ group\ has\ been\ deleted,\ can\ only\ revert\ one\ by\ one. = snapshot(s) {0} in the group has been deleted, can only revert one by one. +volume(s)\ %s\ is\ no\ longer\ attached,\ can\ only\ revert\ one\ by\ one.\ If\ you\ need\ to\ group\ revert,\ please\ re-attach\ it. = volume(s) {0} is no longer attached, can only revert one by one. If you need to group revert, please re-attach it. +new\ volume(s)\ %s\ attached\ after\ snapshot\ point,\ can\ only\ revert\ one\ by\ one.\ If\ you\ need\ to\ group\ revert,\ please\ detach\ it. = new volume(s) {0} attached after snapshot point, can only revert one by one. If you need to group revert, please detach it. +\ volume[uuid\:\ %s]\ has\ been\ referenced\ by\ other\ volumes\ [%s],\ can\ not\ change\ install\ path\ before\ flatten\ them\ and\ their\ descendants\ = volume[uuid: {0}] has been referenced by other volumes [{1}], can not change install path before flatten them and their descendants current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = current volume state[{0}] doesn''t allow to proceed message[{1}] failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = failed to select backup storage to download iso[uuid={0}] unable\ to\ download\ iso\ to\ primary\ storage = unable to download iso to primary storage -volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = volume[uuid:{0}] is not in status Ready, current is {1}, can''t create snapshot volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = volume[uuid:{0}, type:{1}], can''t create snapshot +volume[uuid\:%s]\ is\ not\ in\ state\ Enabled,\ current\ is\ %s,\ can't\ create\ snapshot = volume[uuid:{0}] is not in state Enabled, current is {1}, can''t create snapshot Can\ not\ take\ memory\ snapshot,\ vm\ current\ state[%s],\ but\ expect\ state\ are\ [%s,\ %s] = Can not take memory snapshot, vm current state[{0}], but expect state are [{1}, {2}] +volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = volume[uuid:{0}] is not in status Ready, current is {1}, can''t create snapshot the\ volume[uuid\:%s]\ is\ not\ in\ status\ of\ deleted.\ This\ is\ operation\ is\ to\ recover\ a\ deleted\ data\ volume = the volume[uuid:{0}] is not in status of deleted. This is operation is to recover a deleted data volume image[uuid\:%s]\ is\ not\ %s,\ it's\ %s = image[uuid:{0}] is not {1}, it''s {2} image[uuid\:%s]\ is\ not\ Enabled,\ it's\ %s = image[uuid:{0}] is not Enabled, it''s {1} @@ -3347,17 +3989,21 @@ the\ volume[uuid\:%s]\ is\ in\ status\ of\ deleted,\ cannot\ do\ the\ operation data\ volume[uuid\:%s]\ has\ been\ attached\ to\ some\ vm,\ can't\ attach\ again = data volume[uuid:{0}] has been attached to some vm, can''t attach again data\ volume\ can\ only\ be\ attached\ when\ status\ is\ [%s,\ %s],\ current\ is\ %s = data volume can only be attached when status is [{0}, {1}], current is {2} data\ volume[uuid\:%s]\ of\ format[%s]\ is\ not\ supported\ for\ attach\ to\ any\ hypervisor. = data volume[uuid:{0}] of format[{1}] is not supported for attach to any hypervisor. +data\ volume[uuid\:%s]\ has\ format[%s]\ that\ can\ only\ be\ attached\ to\ hypervisor[%s],\ but\ vm\ has\ hypervisor\ type[%s].\ Can't\ attach = data volume[uuid:{0}] has format[{1}] that can only be attached to hypervisor[{2}], but vm has hypervisor type[{3}]. Can''t attach Can\ not\ attach\ volume\ to\ vm\ runs\ on\ host[uuid\:\ %s]\ which\ is\ disconnected\ with\ volume's\ storage[uuid\:\ %s] = Can not attach volume to vm runs on host[uuid: {0}] which is disconnected with volume''s storage[uuid: {1}] it's\ not\ allowed\ to\ backup\ root\ volume,\ uuid\:%s = it''s not allowed to backup root volume, uuid:{0} unexpected\ disk\ size\ settings = unexpected disk size settings volume[uuid\:%s,\ type\:%s]\ can't\ be\ deleted = volume[uuid:{0}, type:{1}] can''t be deleted volume[uuid\:%s]\ is\ already\ in\ status\ of\ deleted = volume[uuid:{0}] is already in status of deleted +can\ not\ delete\ volume[%s],\ because\ volume\ attach\ to\ host[%s] = can not delete volume[{0}], because volume attach to host[{1}] it's\ not\ allowed\ to\ change\ state\ of\ root\ volume,\ uuid\:%s = it''s not allowed to change state of root volume, uuid:{0} +can\ not\ change\ volume[%s]\ state,\ because\ volume\ attach\ to\ host[%s] = can not change volume[{0}] state, because volume attach to host[{1}] can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ host[status\:%s]\ is\ not\ connected = can not attach volume[{0}] to host[{1}], because host[status:{2}] is not connected mount\ path\ must\ be\ absolute\ path = mount path must be absolute path can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ volume\ is\ attaching\ to\ host[%s] = can not attach volume[{0}] to host[{1}], because volume is attaching to host[{2}] can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ the\ volume[%s]\ occupies\ the\ mount\ path[%s]\ on\ host[%s] = can not attach volume[{0}] to host[{1}], because the volume[{2}] occupies the mount path[{3}] on host[{4}] can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ the\ another\ volume\ occupies\ the\ mount\ path[%s] = can not attach volume[{0}] to host[{1}], because the another volume occupies the mount path[{2}] +can\ not\ detach\ volume[%s]\ from\ host.\ it\ may\ have\ been\ detached = can not detach volume[{0}] from host. it may have been detached cannot\ flatten\ a\ shareable\ volume[uuid\:%s] = cannot flatten a shareable volume[uuid:{0}] can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s] = can not found in used snapshot tree of volume[uuid: {0}] cannot\ undo\ not\ latest\ snapshot = cannot undo not latest snapshot @@ -3435,10 +4081,13 @@ cannot\ update\ simple\ tag\ pattern\ format = cannot update simple tag pattern simple\ tag\ pattern\ has\ no\ tokens = simple tag pattern has no tokens illegal\ tag\ uuids\ %s,\ tag\ type\ must\ be\ simple, = illegal tag uuids {0}, tag type must be simple, Invalid\ color\ specification[%s],\ must\ like\ #FF00FF = Invalid color specification[{0}], must like #FF00FF +Get\ format[%s],\ format\ must\ like\ that\ name\:\:{tokenName1}\:\:{tokenName2}\ ...\ \:\:{tokenNameN}\ or\ {tokenName1}\:\:{tokenName2}\ ...\ \:\:{tokenNameN}\ Name\ cannot\ contain\ '{}\:' = Get format[{0}], format must like that name::'{tokenName1}'::'{tokenName2}' ... ::'{tokenNameN}' or '{tokenName1}'::'{tokenName2}' ... ::'{tokenNameN}' Name cannot contain '''{}':'' all\ tokens\ %s\ must\ be\ specify = all tokens {0} must be specify you\ already\ has\ a\ tag\ which\ [name\:%s,\ color\:%s] = you already has a tag which [name:{0}, color:{1}] resource[uuid\:%s]\ has\ been\ attached\ %d\ tags,\ cannot\ attach\ any\ more = resource[uuid:{0}] has been attached {1} tags, cannot attach any more +# In Module: test-premium + # In Module: test I\ should\ not\ be\ in\ error\ list\ %d = I should not be in error list {0} I\ should\ not\ be\ in\ error\ list\ either\ %d = I should not be in error list either {0} @@ -3447,6 +4096,14 @@ done,\ on\ purpose = done, on purpose I\ should\ not\ be\ errs\ list = I should not be errs list I\ should\ not\ be\ errs\ list\ either. = I should not be errs list either. +# In Module: testlib-premium +InfoSecEncryptDriver\ encrypt\ failed = InfoSecEncryptDriver encrypt failed +InfoSecEncryptDriver\ decrypt\ failed = InfoSecEncryptDriver decrypt failed +illegal\ argument\ %s = illegal argument {0} +failed\ to\ decrypt\ data = failed to decrypt data +fail\ to\ decrypt\ cipher\ text = fail to decrypt cipher text +failed\ to\ parse\ MS\ envelope\:\ %s,\ %s = failed to parse MS envelope: {0}, {1} + # In Module: testlib # In Module: ticket @@ -3454,6 +4111,7 @@ ticket[uuid\:%s,\ name\:%s]\ can\ only\ be\ updated\ after\ being\ cancelled,\ c operation\ denied.\ the\ operator\ needs\ to\ be\ done\ by\ account/virtual\ ID[uuid\:%s] = operation denied. the operator needs to be done by account/virtual ID[uuid:{0}] no\ accountSystemType[%s]\ defined\ in\ system = no accountSystemType[{0}] defined in system not\ matched\ ticket\ type\ found = not matched ticket type found +no\ matched\ ticket\ flow\ collection\ or\ no\ default\ ticket\ flow\ collection\ found,\ you\ must\ specify\ the\ flowCollectionUuid\ or\ create\ a\ default\ ticket\ flow\ collection\ in\ system = no matched ticket flow collection or no default ticket flow collection found, you must specify the flowCollectionUuid or create a default ticket flow collection in system Ticket\ flow\ collection[uuid\:%s]\ not\ matches\ ticket\ type[uuid\:%s] = Ticket flow collection[uuid:{0}] not matches ticket type[uuid:{1}] Ticket\ flow\ collection[uuid\:%s]\ is\ invalid,\ contact\ admin\ to\ correct\ it = Ticket flow collection[uuid:{0}] is invalid, contact admin to correct it Ticket\ flow\ collection[uuid\:%s]\ is\ disable,\ can\ not\ be\ used = Ticket flow collection[uuid:{0}] is disable, can not be used @@ -3485,6 +4143,8 @@ two\ factor\ authentication\ failed\ because\ there\ is\ no\ token\ in\ msg\ sys two\ factor\ authentication\ failed\ because\ there\ is\ no\ secret\ for\ %s\:%s = two factor authentication failed because there is no secret for {0}:{1} failed\ to\ verify\ two\ factor\ authentication\ code = failed to verify two factor authentication code +# In Module: upgrade-hack + # In Module: utils # In Module: vhost @@ -3506,6 +4166,7 @@ service\ provider\ of\ the\ vip[uuid\:%s,\ name\:%s,\ ip\:\ %s]\ has\ been\ set\ cannot\ find\ the\ vip[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find the vip[uuid:{0}], it may have been deleted # In Module: virtualRouterProvider +the\ virtual\ router[name\:%s,\ uuid\:%s,\ current\ state\:%s]\ is\ not\ running,and\ cannot\ perform\ required\ operation.\ Please\ retry\ your\ operation\ later\ once\ it\ is\ running = the virtual router[name:{0}, uuid:{1}, current state:{2}] is not running,and cannot perform required operation. Please retry your operation later once it is running virtual\ router[uuid\:%s]\ is\ in\ status\ of\ %s\ that\ cannot\ make\ http\ call\ to\ %s = virtual router[uuid:{0}] is in status of {1} that cannot make http call to {2} virtual\ router[uuid\:%s]\ has\ no\ management\ nic\ that\ cannot\ make\ http\ call\ to\ %s = virtual router[uuid:{0}] has no management nic that cannot make http call to {1} unable\ to\ add\ nic[ip\:%s,\ ip6\:%s,\ mac\:%s]\ to\ virtual\ router\ vm[uuid\:%s\ ip\:%s],\ because\ %s = unable to add nic[ip:{0}, ip6:{1}, mac:{2}] to virtual router vm[uuid:{3} ip:{4}], because {5} @@ -3534,6 +4195,9 @@ No\ virtual\ router\ instance\ offering\ with\ uuid\:%s\ is\ found = No virtual the\ network\ of\ virtual\ router\ instance\ offering\ with\ uuid\:%s\ can't\ be\ same\ with\ private\ l3\ network\ uuid\:%s = the network of virtual router instance offering with uuid:{0} can''t be same with private l3 network uuid:{1} unable\ to\ find\ a\ virtual\ router\ offering\ for\ l3Network[uuid\:%s]\ in\ zone[uuid\:%s],\ please\ at\ least\ create\ a\ default\ virtual\ router\ offering\ in\ that\ zone = unable to find a virtual router offering for l3Network[uuid:{0}] in zone[uuid:{1}], please at least create a default virtual router offering in that zone Failed\ to\ start\ vr\ l3[uuid\:\ %s] = Failed to start vr l3[uuid: {0}] +cannot\ add\ ip\ range,\ because\ l3\ network[uuid\:%s]\ is\ management\ network\ of\ virtual\ router\ offering = cannot add ip range, because l3 network[uuid:{0}] is management network of virtual router offering +cannot\ add\ ip\ range,\ because\ l3\ network[uuid\:%s]\ is\ management\ network\ of\ virtual\ router = cannot add ip range, because l3 network[uuid:{0}] is management network of virtual router +couldn't\ add\ image,\ because\ systemTag\ [%s]\ includes\ invalid\ appliance\ image\ type\ [%s] = couldn''t add image, because systemTag [{0}] includes invalid appliance image type [{1}] failed\ tot\ attach\ virtual\ router\ network\ services\ to\ l3Network[uuid\:%s].\ When\ eip\ is\ selected,\ snat\ must\ be\ selected\ too = failed tot attach virtual router network services to l3Network[uuid:{0}]. When eip is selected, snat must be selected too failed\ tot\ attach\ virtual\ router\ network\ services\ to\ l3Network[uuid\:%s].\ When\ port\ forwarding\ is\ selected,\ snat\ must\ be\ selected\ too = failed tot attach virtual router network services to l3Network[uuid:{0}]. When port forwarding is selected, snat must be selected too update\ virtual\ router\ [uuid\:%s]\ default\ network\ failed,\ because\ %s = update virtual router [uuid:{0}] default network failed, because {1} @@ -3544,21 +4208,29 @@ unable\ to\ program\ dhcp\ entries\ served\ by\ virtual\ router[uuid\:%s,\ ip\:% virtual\ router[uuid\:%s,\ ip\:%s]\ failed\ to\ configure\ dns%s\ for\ L3Network[uuid\:%s,\ name\:%s],\ %s = virtual router[uuid:{0}, ip:{1}] failed to configure dns{2} for L3Network[uuid:{3}, name:{4}], {5} virtual\ router[name\:\ %s,\ uuid\:\ %s]\ failed\ to\ configure\ dns%s,\ %s\ = virtual router[name: {0}, uuid: {1}] failed to configure dns{2}, {3} failed\ to\ create\ eip[uuid\:%s,\ name\:%s,\ ip\:%s]\ for\ vm\ nic[uuid\:%s]\ on\ virtual\ router[uuid\:%s],\ %s = failed to create eip[uuid:{0}, name:{1}, ip:{2}] for vm nic[uuid:{3}] on virtual router[uuid:{4}], {5} +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ EIP[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network''s public network[uuid:{3}] is not the same to EIP[uuid:{4}]''s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network failed\ to\ remove\ eip[uuid\:%s,\ name\:%s,\ ip\:%s]\ for\ vm\ nic[uuid\:%s]\ on\ virtual\ router[uuid\:%s],\ %s = failed to remove eip[uuid:{0}, name:{1}, ip:{2}] for vm nic[uuid:{3}] on virtual router[uuid:{4}], {5} failed\ to\ sync\ eip\ on\ virtual\ router[uuid\:%s],\ %s = failed to sync eip on virtual router[uuid:{0}], {1} ha\ group\ extension\ point\ nil = ha group extension point nil +new\ add\ vm\ nics[uuids\:%s]\ and\ attached\ vmnics\ are\ not\ on\ the\ same\ vrouter,\ they\ are\ on\ vrouters[uuids\:%s] = new add vm nics[uuids:{0}] and attached vmnics are not on the same vrouter, they are on vrouters[uuids:{1}] +new\ add\ vm\ nics[uuids\:%s]\ and\ peer\ l3s[uuids\:%s]\ of\ loadbalancer[uuid\:\ %s]'s\ vip\ are\ not\ on\ the\ same\ vrouter,\ they\ are\ on\ vrouters[uuids\:%s] = new add vm nics[uuids:{0}] and peer l3s[uuids:{1}] of loadbalancer[uuid: {2}]''s vip are not on the same vrouter, they are on vrouters[uuids:{3}] vmnic\ must\ be\ specified\ for\ share\ loadbalancer = vmnic must be specified for share loadbalancer cannot\ find\ virtual\ router\ for\ load\ balancer\ [uuid\:%s] = cannot find virtual router for load balancer [uuid:{0}] guest\ l3Network[uuid\:%s,\ name\:%s]\ needs\ SNAT\ service\ provided\ by\ virtual\ router,\ but\ public\ l3Network[uuid\:%s]\ of\ virtual\ router\ offering[uuid\:\ %s,\ name\:%s]\ is\ the\ same\ to\ this\ guest\ l3Network = guest l3Network[uuid:{0}, name:{1}] needs SNAT service provided by virtual router, but public l3Network[uuid:{2}] of virtual router offering[uuid: {3}, name:{4}] is the same to this guest l3Network virtual\ router[name\:\ %s,\ uuid\:\ %s]\ failed\ to\ sync\ snat%s,\ %s = virtual router[name: {0}, uuid: {1}] failed to sync snat{2}, {3} failed\ to\ create\ port\ forwarding\ rule[vip\ ip\:\ %s,\ private\ ip\:\ %s,\ vip\ start\ port\:\ %s,\ vip\ end\ port\:\ %s,\ private\ start\ port\:\ %s,\ private\ end\ port\:\ %s],\ because\ %s = failed to create port forwarding rule[vip ip: {0}, private ip: {1}, vip start port: {2}, vip end port: {3}, private start port: {4}, private end port: {5}], because {6} failed\ to\ revoke\ port\ forwarding\ rules\ %s,\ because\ %s = failed to revoke port forwarding rules {0}, because {1} +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ PortForwarding\ rule[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network''s public network[uuid:{3}] is not the same to PortForwarding rule[uuid:{4}]''s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network +virtual\ router\ doesn't\ support\ port\ forwarding\ range\ redirection,\ the\ vipPortStart\ must\ be\ equals\ to\ privatePortStart\ and\ vipPortEnd\ must\ be\ equals\ to\ privatePortEnd;but\ this\ rule\ rule\ has\ a\ mismatching\ range\:\ vip\ port[%s,\ %s],\ private\ port[%s,\ %s] = virtual router doesn''t support port forwarding range redirection, the vipPortStart must be equals to privatePortStart and vipPortEnd must be equals to privatePortEnd;but this rule rule has a mismatching range: vip port[{0}, {1}], private port[{2}, {3}] failed\ to\ add\ portforwardings\ on\ virtual\ router[uuid\:%s],\ %s = failed to add portforwardings on virtual router[uuid:{0}], {1} failed\ to\ revoke\ port\ forwardings\ on\ virtual\ router[uuid\:%s],\ %s = failed to revoke port forwardings on virtual router[uuid:{0}], {1} failed\ to\ sync\ port\ forwarding\ rules\ served\ by\ virtual\ router[name\:\ %s,\ uuid\:\ %s],\ because\ %s = failed to sync port forwarding rules served by virtual router[name: {0}, uuid: {1}], because {2} +failed\ to\ sync\ vips[ips\:\ %s]\ on\ virtual\ router[uuid\:%s]\ for\ attaching\ nic[uuid\:\ %s,\ ip\:\ %s],\ because\ %s = failed to sync vips[ips: {0}] on virtual router[uuid:{1}] for attaching nic[uuid: {2}, ip: {3}], because {4} failed\ to\ remove\ vip%s,\ because\ %s = failed to remove vip{0}, because {1} virtual\ router[uuid\:%s,\ state\:%s]\ is\ not\ running = virtual router[uuid:{0}, state:{1}] is not running +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ VIP[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network''s public network[uuid:{3}] is not the same to VIP[uuid:{4}]''s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network failed\ to\ change\ nic[ip\:%s,\ mac\:%s]\ firewall\ default\ action\ of\ virtual\ router\ vm[uuid\:%s],\ because\ %s = failed to change nic[ip:{0}, mac:{1}] firewall default action of virtual router vm[uuid:{2}], because {3} +the\ SSH\ port\ is\ not\ open\ after\ %s\ seconds.\ Failed\ to\ login\ the\ virtual\ router[ip\:%s] = the SSH port is not open after {0} seconds. Failed to login the virtual router[ip:{1}] unable\ to\ ssh\ in\ to\ the\ virtual\ router[%s]\ after\ configure\ ssh = unable to ssh in to the virtual router[{0}] after configure ssh vyos\ init\ command\ failed,\ because\:%s = vyos init command failed, because:{0} virtual\ router\ deploy\ agent\ failed,\ because\ %s = virtual router deploy agent failed, because {0} @@ -3570,29 +4242,197 @@ failed\ to\ update\ bridge[%s]\ for\ l2Network[uuid\:%s,\ name\:%s]\ on\ kvm\ ho bonding[%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = bonding[{0}] is not found on host[uuid:{1}] for virtual switch[uuid:{2}] failed\ to\ update\ vlan\ bridge\ for\ virtual\ switch[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = failed to update vlan bridge for virtual switch[uuid:{0}, name:{1}] on kvm host[uuid:{2}], {3} The\ uplink\ bonding[%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = The uplink bonding[{0}] is not found on host[uuid:{1}] for virtual switch[uuid:{2}] +an\ unexpected\ error\ caused\ the\ bonding\ to\ not\ be\ created\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = an unexpected error caused the bonding to not be created on host[uuid:{0}] for virtual switch[uuid:{1}] interface[uuid\:%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = interface[uuid:{0}] is not found on host[uuid:{1}] for virtual switch[uuid:{2}] +the\ default\ virtual\ switch\ network[uuid\:%s]\ cannot\ be\ deleted\ when\ it\ is\ still\ attached\ to\ hosts = the default virtual switch network[uuid:{0}] cannot be deleted when it is still attached to hosts +could\ not\ delete\ virtual\ switch\ network[uuid\:%s],because\ host\ kernel\ interface[uuid\:%s]\ still\ exists\ on\ the\ virtual\ switch\ and\ its\ host\ status\ is\ not\ connected = could not delete virtual switch network[uuid:{0}],because host kernel interface[uuid:{1}] still exists on the virtual switch and its host status is not connected could\ not\ delete\ l2\ network[uuid\:%s]\ with\ default\ port\ group = could not delete l2 network[uuid:{0}] with default port group +could\ not\ delete\ l2\ port\ group\ network[uuid\:%s],because\ host\ kernel\ interface[%s]\ still\ exists\ on\ the\ port\ group\ and\ its\ host\ status\ is\ not\ connected = could not delete l2 port group network[uuid:{0}],because host kernel interface[{1}] still exists on the port group and its host status is not connected +cannot\ delete\ default\ port\ group[uuid\:%s],\ because\ there\ are\ host\ kernel\ interfaces\ still\ exist\ on\ hosts[uuid\:%s] = cannot delete default port group[uuid:{0}], because there are host kernel interfaces still exist on hosts[uuid:{1}] +could\ not\ delete\ port\ group[uuid\:%s],\ because\ host\ kernel\ interface[uuid\:%s]\ still\ exists\ on\ the\ port\ group\ and\ its\ host\ status\ is\ not\ connected = could not delete port group[uuid:{0}], because host kernel interface[uuid:{1}] still exists on the port group and its host status is not connected +could\ not\ create\ host\ kernel\ interface,\ because\ requiredIp\ cannot\ be\ null\ with\ l3Network[uuid\:%s]\ disable\ IPAM = could not create host kernel interface, because requiredIp cannot be null with l3Network[uuid:{0}] disable IPAM +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ hostUuid\ in\ struct\ should\ be\ set = could not batch create host kernel interface, because hostUuid in struct should be set +could\ not\ create\ host\ kernel\ interface,\ because\ host[uuid\:%s]\ not\ found = could not create host kernel interface, because host[uuid:{0}] not found +could\ not\ create\ host\ kernel\ interface\ for\ host[uuid\:%s],\ because\ name\ should\ be\ set = could not create host kernel interface for host[uuid:{0}], because name should be set +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ ip\ cannot\ be\ null\ with\ l3Network[uuid\:%s]\ disable\ IPAM = could not batch create host kernel interface, because ip cannot be null with l3Network[uuid:{0}] disable IPAM +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ duplicate\ ipv4\ address[%s]\ in\ input\ structs = could not batch create host kernel interface, because duplicate ipv4 address[{0}] in input structs +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ duplicate\ ipv6\ address[%s]\ in\ input\ structs = could not batch create host kernel interface, because duplicate ipv6 address[{0}] in input structs +could\ not\ update\ host\ kernel\ interface[uuid\:%s],\ because\ netmask\ cannot\ be\ set\ without\ requiredIp = could not update host kernel interface[uuid:{0}], because netmask cannot be set without requiredIp +could\ not\ update\ host\ kernel\ interface[uuid\:%s],\ because\ host[uuid\:%s]\ is\ not\ connected = could not update host kernel interface[uuid:{0}], because host[uuid:{1}] is not connected could\ not\ delete\ default\ host\ kernel\ interface[uuid\:%s] = could not delete default host kernel interface[uuid:{0}] +could\ not\ delete\ host\ kernel\ interface[uuid\:%s],\ because\ host[uuid\:%s]\ is\ not\ connected = could not delete host kernel interface[uuid:{0}], because host[uuid:{1}] is not connected the\ index\ of\ virtual\ switch\ in\ zone[%s]\ exceeds\ the\ maximum[%s] = the index of virtual switch in zone[{0}] exceeds the maximum[{1}] need\ to\ input\ one\ system\ tag\ like\:\ [%s] = need to input one system tag like: [{0}] +physicalInterface\ should\ not\ be\ null\ when\ uplink\ bonding\ is\ set = physicalInterface should not be null when uplink bonding is set only\ one\ systemTag\ for\ uplink\ bonding\ is\ allowed = only one systemTag for uplink bonding is allowed wrong\ xmit\ hash\ policy\ in\ system\ tag[%s] = wrong xmit hash policy in system tag[{0}] wrong\ bonding\ mode\ in\ system\ tag[%s] = wrong bonding mode in system tag[{0}] wrong\ system\ tag[%s],\ should\ be\ like\:\ [%s] = wrong system tag[{0}], should be like: [{1}] +could\ not\ create\ L2PortGroupNetwork,\ because\ L2VirtualSwitchNetwork[uuid\:%s]\ already\ has\ L2PortGroupNetworks\ with\ the\ same\ vlanId[%s] = could not create L2PortGroupNetwork, because L2VirtualSwitchNetwork[uuid:{0}] already has L2PortGroupNetworks with the same vlanId[{1}] +could\ not\ attach\ L2PortGroupNetwork[uuid\:%s]\ to\ cluster[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could not attach L2PortGroupNetwork[uuid:{0}] to cluster[uuid:{1}], which L2VirtualSwitchNetwork should be used could\ not\ attach\ L2Network\ to\ KVM\ cluster,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = could not attach L2Network to KVM cluster, because the l2Network[uuid:{0}] is default vSwitch +could\ not\ attach\ L2VirtualSwitchNetwork,\ because\ interface[%s]\ in\ cluster[uuid\:%s]\ is\ already\ used\ for\ another\ L2VirtualSwitchNetwork = could not attach L2VirtualSwitchNetwork, because interface[{0}] in cluster[uuid:{1}] is already used for another L2VirtualSwitchNetwork +could\ not\ attach\ L2PortGroupNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could not attach L2PortGroupNetwork[uuid:{0}] to host[uuid:{1}], which L2VirtualSwitchNetwork should be used +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ the\ physical\ interface[%s]\ is\ invalid = could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because the physical interface[{2}] is invalid +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ the\ pass-through\ state\ of\ physical\ interface[%s]\ is\ [Enabled] = could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because the pass-through state of physical interface[{2}] is [Enabled] +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ there\ is\ no\ uplink\ configured\ for\ the\ virtual\ switch\ on\ the\ host = could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because there is no uplink configured for the virtual switch on the host +could\ not\ detach\ L2PortGroupNetwork[uuid\:%s]\ from\ cluster[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could not detach L2PortGroupNetwork[uuid:{0}] from cluster[uuid:{1}], which L2VirtualSwitchNetwork should be used could\ not\ detach\ L2Network\ from\ KVM\ cluster,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = could not detach L2Network from KVM cluster, because the l2Network[uuid:{0}] is default vSwitch +could\ not\ detach\ L2PortGroupNetwork[uuid\:%s]\ from\ host[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could not detach L2PortGroupNetwork[uuid:{0}] from host[uuid:{1}], which L2VirtualSwitchNetwork should be used could\ not\ detach\ L2Network\ from\ host,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = could not detach L2Network from host, because the l2Network[uuid:{0}] is default vSwitch +could\ not\ create\ port\ group\ for\ L2Network[uuid\:%s]that\ does\ not\ belong\ to\ vSwitch[uuid\:%s] = could not create port group for L2Network[uuid:{0}]that does not belong to vSwitch[uuid:{1}] could\ not\ create\ l3\ network\ on\ virtual\ switch[uuid\:%s] = could not create l3 network on virtual switch[uuid:{0}] vlan[%s]\ for\ port\ group\ is\ invalid = vlan[{0}] for port group is invalid could\ not\ update\ vlan\ for\ port\ group\ with\ default\ port\ group = could not update vlan for port group with default port group +could\ not\ update\ vlan\ for\ port\ group,\ because\ L2VirtualSwitchNetwork[uuid\:%s]\ already\ has\ L2PortGroupNetworks\ with\ the\ same\ vlanId[%s] = could not update vlan for port group, because L2VirtualSwitchNetwork[uuid:{0}] already has L2PortGroupNetworks with the same vlanId[{1}] +could\ not\ update\ uplink\ bonding\ of\ default\ vSwitch\ when\ it\ is\ still\ attached\ to\ hosts\ with\ uplink\ bonding\ exist = could not update uplink bonding of default vSwitch when it is still attached to hosts with uplink bonding exist +bondingName\ cannot\ be\ empty\ \ when\ virtual\ switch\ has\ no\ uplink\ bonding\ config = bondingName cannot be empty when virtual switch has no uplink bonding config +could\ not\ update\ uplink\ bonding\ name\ because\ the\ version\ of\ the\ virtual\ switch[uuid\:%s]\ is\ old = could not update uplink bonding name because the version of the virtual switch[uuid:{0}] is old +could\ not\ update\ uplink\ bonding\ name\ when\ virtual\ switch\ has\ uplink\ bonding\ group = could not update uplink bonding name when virtual switch has uplink bonding group +could\ not\ update\ uplink\ bonding\ name\ which\ has\ been\ occupied\ by\ another\ virtual\ switch\ attached\ to\ the\ same\ cluster = could not update uplink bonding name which has been occupied by another virtual switch attached to the same cluster virtual\ switch[uuid\:%s]\ has\ not\ attached\ to\ host[uuid\:%s] = virtual switch[uuid:{0}] has not attached to host[uuid:{1}] need\ input\ at\ least\ one\ slave = need input at least one slave virtual\ switch[uuid\:%s]\ has\ not\ created\ uplink\ bonding\ config\ yet = virtual switch[uuid:{0}] has not created uplink bonding config yet +cannot\ update\ uplink\ to\ bonding,\ because\ bonding[%s]\ already\ exists\ on\ host[uuid\:%s] = cannot update uplink to bonding, because bonding[{0}] already exists on host[uuid:{1}] +could\ not\ update\ mode\ or\ xmit_hash_policy\ of\ bonding[uuid\:%s]\ which\ is\ in\ use\ by\ virtual\ switch[uuid\:%s] = could not update mode or xmit_hash_policy of bonding[uuid:{0}] which is in use by virtual switch[uuid:{1}] could\ not\ delete\ bonding[uuid\:%s],\ because\ it\ is\ in\ use\ by\ virtual\ switch[uuid\:%s] = could not delete bonding[uuid:{0}], because it is in use by virtual switch[uuid:{1}] failed\ to\ create\ hostKernelInterface[name\:%s]\ on\ the\ host[uuid\:%s],\ %s = failed to create hostKernelInterface[name:{0}] on the host[uuid:{1}], {2} failed\ to\ delete\ hostKernelInterface[uuid\:%s]\ on\ the\ host[uuid\:%s],\ %s = failed to delete hostKernelInterface[uuid:{0}] on the host[uuid:{1}], {2} failed\ to\ refresh\ host\ kernel\ interface\ on\ host[uuid\:%s],\ %s = failed to refresh host kernel interface on host[uuid:{0}], {1} +failed\ to\ create\ default\ port\ group,\ because\ the\ bridge\ name[%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ the\ bridge\ name[%s]\ of\ vlanId[%s]\ on\ default\ virtual\ switch[%s] = failed to create default port group, because the bridge name[{0}] of managementIp[{1}] must be the same as the bridge name[{2}] of vlanId[{3}] on default virtual switch[{4}] failed\ to\ get\ the\ host\ interface\ for\ the\ managementIp[%s] = failed to get the host interface for the managementIp[{0}] +failed\ to\ create\ default\ kernel\ interface,because\ the\ uplink\ bonding[name\:%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ cluster[uuid\:%s]\ default\ uplink\ bonding[name\:%s] = failed to create default kernel interface,because the uplink bonding[name:{0}] of managementIp[{1}] must be the same as cluster[uuid:{2}] default uplink bonding[name:{3}] +failed\ to\ create\ default\ port\ group,\ because\ the\ vlanId[%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ cluster[uuid\:%s]\ default\ vlanId[%s] = failed to create default port group, because the vlanId[{0}] of managementIp[{1}] must be the same as cluster[uuid:{2}] default vlanId[{3}] + +# In Module: woodpecker + +# In Module: vpcFirewall +can\ not\ detach\ system\ default\ ruleSet = can not detach system default ruleSet +only\ system\ ruleSet\ can\ change\ action\ type = only system ruleSet can change action type +can\ not\ delete\ system\ default\ ruleSet = can not delete system default ruleSet +can\ not\ delete\ system\ default\ rule = can not delete system default rule +the\ router\ [uuid\:%s]\ does\ not\ has\ a\ master\ router = the router [uuid:{0}] does not has a master router +the\ VPC\ Router[uuid\:%s]\ already\ has\ a\ firewall. = the VPC Router[uuid:{0}] already has a firewall. +already\ has\ a\ rule\ template\ with\ name\ %s = already has a rule template with name {0} +the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ rule\ number\ %s. = the ruleSet[{0}] already has a rule with rule number {1}. +can\ not\ update\ default\ rule[%s] = can not update default rule[{0}] +only\ tcp\ protocol\ can\ use\ tcp\ flag = only tcp protocol can use tcp flag +only\ icmp\ protocol\ can\ use\ icmp\ type = only icmp protocol can use icmp type +the\ rule\ [%s]\ number\ is\ invalid = the rule [{0}] number is invalid +can\ not\ attach\ the\ default\ ruleSet\ to\ other\ nic = can not attach the default ruleSet to other nic +ruleSet[%s]\ already\ has\ a\ l3[%s] = ruleSet[{0}] already has a l3[{1}] +already\ has\ a\ rule\ with\ the\ number[%s] = already has a rule with the number[{0}] +the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ the\ rule\ number\ %s. = the ruleSet[{0}] already has a rule with the rule number {1}. +could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = could not add firewall rule[{0}] only tcp or udp protocol can use port +could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ protocol\ can\ use\ tcp\ flag = could not add firewall rule[{0}] only tcp protocol can use tcp flag +could\ not\ add\ firewall\ rule[%d]\ because\ only\ icmp\ protocol\ can\ use\ icmp\ type = could not add firewall rule[{0}] because only icmp protocol can use icmp type +could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = could not add firewall rule[{0}] because only tcp or udp protocol can use port +could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ protocol\ can\ use\ tcp\ flag = could not add firewall rule[{0}] because only tcp protocol can use tcp flag +could\ not\ add\ firewall\ rule[%d]\ because\ %s = could not add firewall rule[{0}] because {1} +could\ not\ add\ firewall\ rule,\ because\ ruleNo\ %d\ is\ invalid = could not add firewall rule, because ruleNo {0} is invalid +could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ action\ for\ ruleNo\:%d = could not add firewall rule, because there is no action for ruleNo:{0} +could\ not\ add\ firewall\ rule,\ because\ source\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because source IP length: {0} is not valid for ruleNo:{1} +could\ not\ add\ firewall\ rule,\ because\ destination\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because destination IP length: {0} is not valid for ruleNo:{1} +could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ state\ for\ ruleNo\:%d = could not add firewall rule, because there is no state for ruleNo:{0} +could\ not\ add\ firewall\ rule,\ because\ description\ length\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because description length {0} is not valid for ruleNo:{1} +the\ configuration\ file\ has\ format\ error = the configuration file has format error +the\ firewall\ rules\ in\ the\ configuration\ file\ have\ syntax\ errors\:\ %s = the firewall rules in the configuration file have syntax errors: {0} +sync\ firewall\ config\ failed,because\ %s = sync firewall config failed,because {0} +update\ firewall\ ruleSet\ action\ failed,\ because\ %s = update firewall ruleSet action failed, because {0} +Can\ not\ find\ l3[%]\ related\ mac\ on\ vRouter[%s] = Can not find l3[%] related mac on vRouter[{0}] +create\ firewall\ rule[%s]\ failed,\ because\ %s = create firewall rule[{0}] failed, because {1} +delete\ firewall\ on\ vRouter[%s],because\ %s = delete firewall on vRouter[{0}],because {1} +create\ firewall\ ruleSet[%s]\ failed,\ because\ %s = create firewall ruleSet[{0}] failed, because {1} +delete\ firewall\ rule\ failed\ on\ vRouter[%s],\ because\ %s = delete firewall rule failed on vRouter[{0}], because {1} +change\ firewall\ rule\ state\ on\ vRouter[%s]\ failed,\ because\ %s = change firewall rule state on vRouter[{0}] failed, because {1} +attach\ firewall\ ruleSet[%s]\ failed,\ because\ %s = attach firewall ruleSet[{0}] failed, because {1} +detach\ ruleSet\ failed,\ maybe\ it\ has\ been\ deleted = detach ruleSet failed, maybe it has been deleted +detach\ firewall\ ruleSet[%s]\ failed,because\ %s = detach firewall ruleSet[{0}] failed,because {1} +cannot\ find\ vpcFirewall[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewall[uuid:{0}], it may have been deleted +cannot\ find\ vpcFirewallRuleSet[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewallRuleSet[uuid:{0}], it may have been deleted +cannot\ find\ vpcFirewallIpSetTemplate[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewallIpSetTemplate[uuid:{0}], it may have been deleted +attach\ firewall\ ruleSet[%s]\ to\ l3[%s]\ failed,because\ %s = attach firewall ruleSet[{0}] to l3[{1}] failed,because {2} +detach\ firewall\ ruleSet\ from\ l3[%s]\ failed,because\ %s = detach firewall ruleSet from l3[{0}] failed,because {1} +find\ duplicate\ rule\ numbers\ %s\ on\ firewall[%s],l3[%s],forward[%s] = find duplicate rule numbers {0} on firewall[{1}],l3[{2}],forward[{3}] +no\ changes\ in\ ruleset\ %s = no changes in ruleset {0} +firewall\ %s\ related\ vpc\ not\ in\ running\ state = firewall {0} related vpc not in running state +can\ not\ delete\ ruleSet[%s]\ because\ it\ still\ attached\ to\ nic = can not delete ruleSet[{0}] because it still attached to nic +default\ ruleset\ %s\ can\ only\ attached\ to\ one\ interface\ forward,\ but\ find\ %s\ related\ interface = default ruleset {0} can only attached to one interface forward, but find {1} related interface +cannot\ find\ vpcFirewall[uuid\:%s]\ related\ vRouter = cannot find vpcFirewall[uuid:{0}] related vRouter + +# In Module: xdragon +xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = xdragon host not support create vm using an iso image. + +# In Module: yunshan +the\ url\ is\ null,\ please\ config\ the\ YunShan\ NSP. = the url is null, please config the YunShan NSP. + +# In Module: zboxbackup +please\ insert\ zbox\ to\ management\ node. = please insert zbox to management node. +some\ volume[uuids\:%s]\ recover\ failed.\ you\ can\ trigger\ it\ again\ by\ reconnect\ it. = some volume[uuids:{0}] recover failed. you can trigger it again by reconnect it. +there\ is\ another\ external\ backup[uuid\:\ %s]\ recovering = there is another external backup[uuid: {0}] recovering +both\ hostUuids\ and\ backupStorageUuids\ are\ empty.\ you\ must\ specify\ one\ or\ both\ of\ them. = both hostUuids and backupStorageUuids are empty. you must specify one or both of them. +cannot\ find\ recover.conf\ under\ zbox\ backup\ install\ dir. = cannot find recover.conf under zbox backup install dir. +zbox\ should\ be\ inserted\ to\ a\ host\ first. = zbox should be inserted to a host first. +fail\ to\ backup\ database = fail to backup database + +# In Module: zops-plugin +failed\ to\ config\ time\ sources\ '%s'\ in\ %s,\ because\:%s,\ raw\:\ %s = failed to config time sources ''{0}'' in {1}, because:{2}, raw: {3} +%s\ is\ unreachable\ from\ %s,\ because\:%s = {0} is unreachable from {1}, because:{2} +failed\ to\ check\ is\ %s\ reachable\ from\ %s,\ because\:%s = failed to check is {0} reachable from {1}, because:{2} +%s\ failed\ to\ check\ ceph\ health\ status,\ because\:\ %s = {0} failed to check ceph health status, because: {1} +failed\ to\ get\ chrony\ sources,\ because\:%s = failed to get chrony sources, because:{0} +failed\ to\ get\ %s's\ chrony\ sources,\ because\:%s = failed to get {0}''s chrony sources, because:{1} +failed\ to\ synchronize\ chrony\ server\ in\ %s,\ because\:%s,\ raw\:\ %s = failed to synchronize chrony server in {0}, because:{1}, raw: {2} +fail\ to\ check\ is\ %s\ reachable\ from\ host\ %s,\ because\ %s\ is\ not\ managed\ by\ us = fail to check is {0} reachable from host {1}, because {2} is not managed by us +fail\ to\ delete\ old\ chrony\ server\ in\ zstack.properties\ in\ %s,\ because\:%s = fail to delete old chrony server in zstack.properties in {0}, because:{1} +fail\ to\ config\ chrony\ %s\ server\ in\ zstack.properties\ in\ %s,\ because\:%s = fail to config chrony {0} server in zstack.properties in {1}, because:{2} +%s\ is\ not\ a\ valid\ ip\ address = {0} is not a valid ip address +internal\ and\ external\ chrony\ servers\ cannot\ be\ null\ at\ the\ same\ time = internal and external chrony servers cannot be null at the same time +%s\ is\ not\ a\ valid\ ip\ address\ or\ domain\ name = {0} is not a valid ip address or domain name +%s\ cannot\ be\ set\ as\ external\ chrony\ server! = {0} cannot be set as external chrony server! +%s\ is\ unreachable\ from\ %s = {0} is unreachable from {1} +ZStone\ not\ support\ update\ chrony\ server\ online\ yet! = ZStone not support update chrony server online yet! +ceph\ status\ is\ unhealthy,\ please\ check\ your\ environment\ first!\ %s = ceph status is unhealthy, please check your environment first! {0} + +# In Module: vxlan +cannot\ configure\ vxlan\ network\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = cannot configure vxlan network for vm[uuid:{0}] on the destination host[uuid:{1}] +cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ out\ of\ vni\ range = cannot allocate vni[{0}] in l2Network[uuid:{1}], out of vni range +cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ duplicate\ with\ l2Network[uuid\:%s] = cannot allocate vni[{0}] in l2Network[uuid:{1}], duplicate with l2Network[uuid:{2}] +find\ multiple\ vtep\ ips[%s]\ for\ one\ host[uuid\:%s],\ need\ to\ delete\ host\ and\ add\ again = find multiple vtep ips[{0}] for one host[uuid:{1}], need to delete host and add again +failed\ to\ find\ vtep\ on\ host[uuid\:\ %s],\ please\ re-attach\ vxlanpool[uuid\:\ %s]\ to\ cluster. = failed to find vtep on host[uuid: {0}], please re-attach vxlanpool[uuid: {1}] to cluster. +failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to create bridge[{0}] for l2Network[uuid:{1}, type:{2}, vni:{3}] on kvm host[uuid:{4}], because {5} +failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = failed to check cidr[{0}] for l2VxlanNetwork[uuid:{1}, name:{2}] on kvm host[uuid:{3}], {4} +failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to delete bridge[{0}] for l2Network[uuid:{1}, type:{2}, vni:{3}] on kvm host[uuid:{4}], because {5} +failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetworkPool[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = failed to check cidr[{0}] for l2VxlanNetworkPool[uuid:{1}, name:{2}] on kvm host[uuid:{3}], {4} +failed\ to\ realize\ vxlan\ network\ pool[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to realize vxlan network pool[uuid:{0}, type:{1}] on kvm host[uuid:{2}], because {3} +vni[%s]\ for\ vxlan\ is\ invalid = vni[{0}] for vxlan is invalid +cannot\ allocate\ vni[%s]\ in\ vxlan\ network[uuid\:%s]\ which\ is\ already\ allocated = cannot allocate vni[{0}] in vxlan network[uuid:{1}] which is already allocated +need\ to\ input\ one\ system\ tag\ like\ \:\ [%s] = need to input one system tag like : [{0}] +wrong\ system\ tag\ [%s],\ should\ be\ like\ \:\ [%s] = wrong system tag [{0}], should be like : [{1}] +wrong\ cidr\ format\ in\ system\ tag\ [%s] = wrong cidr format in system tag [{0}] +overlap\ vni\ range\ with\ %s\ [%s] = overlap vni range with {0} [{1}] +vxlan\ network\ pool\ doesn't\ support\ create\ l3\ network = vxlan network pool doesn''t support create l3 network +Vni\ allocator\ strategy[%s]\ returns\ nothing,\ because\ no\ vni\ is\ available\ in\ this\ VxlanNetwork[name\:%s,\ uuid\:%s] = Vni allocator strategy[{0}] returns nothing, because no vni is available in this VxlanNetwork[name:{1}, uuid:{2}] +Cannot\ find\ L2NetworkClusterRefVO\ item\ for\ l2NetworkUuid[%s]\ clusterUuid[%s] = Cannot find L2NetworkClusterRefVO item for l2NetworkUuid[{0}] clusterUuid[{1}] +ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ exist = ip[{0}] l2NetworkUuid[{1}] clusterUuid[{2}] exist +ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = ip[{0}] l2NetworkUuid[{1}] clusterUuid[{2}] ip exist in local vtep +%s\:is\ not\ ipv4 = {0}:is not ipv4 +vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = vxlan vtep address for host [uuid : {0}] and pool [uuid : {1}] pair already existed + +# In Module: zsv +cannot\ delete\ disaster\ recovery\ license = cannot delete disaster recovery license +invalid\ ip\ format\ [%s] = invalid ip format [{0}] +volume\ %s\ still\ have\ snapshot\ group\ on\ vm\ %s,\ cannot\ attach\ to\ other\ vm = volume {0} still have snapshot group on vm {1}, cannot attach to other vm +volume\ %s\ still\ have\ snapshot\ group,\ cannot\ delete\ it = volume {0} still have snapshot group, cannot delete it +detach\ sharable\ volume\ or\ lun\ device\ before\ operating\ snapshot\ group = detach sharable volume or lun device before operating snapshot group +failed\ to\ find\ ZSV\ additional\ license\ info\:\ %s = failed to find ZSV additional license info: {0} +Failed\ to\ check\ SSH\ keys\ on\ host[%s\:%d] = Failed to check SSH keys on host[{0}:{1}] +SSH\ keys\ are\ incomplete\ on\ host[%s\:%d]. = SSH keys are incomplete on host[{0}:{1}]. +Failed\ to\ generate\ SSH\ keys\ on\ host[%s\:%d] = Failed to generate SSH keys on host[{0}:{1}] +failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node,\ node\:\ %s = failed to check if management node is also compute node, node: {0} +management\ node[uuid\:%s]\ must\ be\ a\ compute\ node = management node[uuid:{0}] must be a compute node +failed\ to\ retrieve\ host\ uuid\ [management\ uuid\:%s] = failed to retrieve host uuid [management uuid:{0}] +failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node = failed to check if management node is also compute node # In Module: volumebackup bandWidth\ must\ be\ a\ positive\ number = bandWidth must be a positive number @@ -3644,6 +4484,7 @@ original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ VM\ not\ found\ with\ volume\ backup[uuid\:%s] = VM not found with volume backup[uuid:{0}] VM\ is\ not\ in\ stopped\ state\:\ %s = VM is not in stopped state: {0} No\ available\ backup\ storage\ found = No available backup storage found +The\ vm\ is\ creating\ a\ backup\ job,\ cannot\ enable\ the\ cdp\ task\ at\ the\ same\ time. = The vm is creating a backup job, cannot enable the cdp task at the same time. cannot\ find\ volume\ backup[uuid\:%s] = cannot find volume backup[uuid:{0}] the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = the cluster of vm[{0}] is not in the same cluster as the primaryStorage[{1}] Operation\ not\ supported\ on\ shared\ volume = Operation not supported on shared volume @@ -3664,6 +4505,7 @@ Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = Volume backup[u degree\ [%s]\ should\ be\ a\ positive\ number = degree [{0}] should be a positive number invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = invalid type[{0}], should be [nfs, sshfs, nbd] invalid\ url[%s],\ should\ be\ hostname\:/path = invalid url[{0}], should be hostname:/path +generate\ volume\ backup\ metadata\ file\ on\ image\ store[uuid\:%s]\ failure,\ because\ IO\ error\:\ %s = generate volume backup metadata file on image store[uuid:{0}] failure, because IO error: {1} volume\ backup\ metadata\ operation\ failure,\ because\ %s = volume backup metadata operation failure, because {0} # In Module: vpc @@ -3677,6 +4519,7 @@ L3Network\ [uuid\:\ %s]\ has\ not\ been\ attached\ to\ vpc\ router = L3Network [ all\ networks\ in\ same\ IPsecConnection\ must\ be\ attached\ to\ same\ VPC\ router = all networks in same IPsecConnection must be attached to same VPC router there\ is\ no\ master\ vpc\ for\ ha\ group\ %s = there is no master vpc for ha group {0} there\ is\ a\ vpc[%s]\ using\ old\ ipsec\ plugin,\ upgrade\ it\ to\ create\ ipsec = there is a vpc[{0}] using old ipsec plugin, upgrade it to create ipsec +there\ already\ have\ ipsec\ connection[uuid\:%s,\ name\:%s]\ with\ the\ same\ vrouter\ and\ peerAddress = there already have ipsec connection[uuid:{0}, name:{1}] with the same vrouter and peerAddress the\ vip[uuid\:%s]\ has\ been\ used\ for\ %s = the vip[uuid:{0}] has been used for {1} the\ peerAddress[%s]\ cannot\ be\ the\ same\ to\ the\ VIP\ address = the peerAddress[{0}] cannot be the same to the VIP address the\ peerAddress[%s]\ is\ not\ an\ IPv4\ address = the peerAddress[{0}] is not an IPv4 address @@ -3709,6 +4552,8 @@ default\ route\ network\ can\ not\ be\ detached = default route network can not original\ public\ network\ can\ not\ be\ detached = original public network can not be detached could\ not\ detach\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ its\ state\ is\ not\ running\ or\ stopped = could not detach l3 network to vpc router[uuid:{0}] because its state is not running or stopped could\ not\ detach\ l3\ network\ to\ vpc\ router[uuid\:%s]\ becaus\ the\ states\ of\ the\ master\ and\ slave\ are\ inconsistent = could not detach l3 network to vpc router[uuid:{0}] becaus the states of the master and slave are inconsistent +l3\ network[uuid\:%s]\ can\ not\ detach\ from\ vpc\ vrouter[uuid\:%s]\ since\ network\ services\ attached\ vips[%s]\ still\ used\ in\ l3 = l3 network[uuid:{0}] can not detach from vpc vrouter[uuid:{1}] since network services attached vips[{2}] still used in l3 +vpc\ l3\ network[uuid\:%s]\ can\ not\ detach\ from\ vpc\ vrouter[uuid\:%s]\ since\ vm\ nics[%s]\ still\ used\ in\ l3 = vpc l3 network[uuid:{0}] can not detach from vpc vrouter[uuid:{1}] since vm nics[{2}] still used in l3 virtual\ router\ offering[uuid\:\ %s]\ is\ not\ enabled = virtual router offering[uuid: {0}] is not enabled only\ vpc\ l3\ network\ can\ attach\ to\ vpc\ vrouter = only vpc l3 network can attach to vpc vrouter Vpc\ network\ [uuid\:%s]\ already\ attached\ to\ vpc\ router\ [uuid\:%s] = Vpc network [uuid:{0}] already attached to vpc router [uuid:{1}] @@ -3717,6 +4562,8 @@ could\ not\ attached\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ its\ stat could\ not\ attached\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ both\ its\ state\ and\ it\ peer\ state\ is\ not\ running\ or\ stopped = could not attached l3 network to vpc router[uuid:{0}] because both its state and it peer state is not running or stopped public\ network[uuid\:\ %s]\ vip[uuid\:\ %s,\ ip\:\ %s]\ peer\ with\ l3network[uuid\:\ %s]\ not\ on\ vpc\ vr[uuid\:\ %s] = public network[uuid: {0}] vip[uuid: {1}, ip: {2}] peer with l3network[uuid: {3}] not on vpc vr[uuid: {4}] the\ gateway[ip\:%s]\ of\ l3[uuid\:%s]\ has\ been\ occupied = the gateway[ip:{0}] of l3[uuid:{1}] has been occupied +the\ static\ ip[%s]\ specified\ in\ message\ not\ equals\ to\ gateway\ ips[%s]\ of\ l3\ network[uuid\:%s] = the static ip[{0}] specified in message not equals to gateway ips[{1}] of l3 network[uuid:{2}] +l3\ network\ [uuid\:%s]\ must\ be\ attached\ first,\ because\ there\ is\ vip\ on\ that\ l3\ network = l3 network [uuid:{0}] must be attached first, because there is vip on that l3 network dns\ address\ [%s]\ is\ not\ added\ to\ vpc\ router\ [uuid\:%s] = dns address [{0}] is not added to vpc router [uuid:{1}] could\ not\ add\ ip\ range\ to\ l3\ network[uuid\:%s],\ because\ it's\ overlap\ with\ cidr\ [%s]\ of\ vRouter\ [uuid\:%s] = could not add ip range to l3 network[uuid:{0}], because it''s overlap with cidr [{1}] of vRouter [uuid:{2}] could\ not\ add\ ipv6\ range\ to\ l3\ network[uuid\:%s],\ because\ it's\ overlap\ with\ cidr\ [%s]\ of\ vRouter\ [uuid\:%s] = could not add ipv6 range to l3 network[uuid:{0}], because it''s overlap with cidr [{1}] of vRouter [uuid:{2}] @@ -3726,11 +4573,13 @@ not\ support\ to\ get\ the\ service\ %s\ state\ to\ virtual\ router\ %s = not su can\ not\ get\ state\ of\ distributed\ routing\ to\ virtual\ router\ %s = can not get state of distributed routing to virtual router {0} not\ support\ to\ update\ the\ service\ %s\ state\ to\ virtual\ router\ %s = not support to update the service {0} state to virtual router {1} virtual\ router\ offering[uuid\:%s,\ name\:%s]\ doesn't\ have\ a\ public\ network = virtual router offering[uuid:{0}, name:{1}] doesn''t have a public network +vpc\ l3\ network\ must\ attach\ a\ vpc\ vrouter\ first\ before\ do\ anything\ related\ to\ vrouter(like\ start/stop\ vm,\ create\ lb,\ etc.) = vpc l3 network must attach a vpc vrouter first before do anything related to vrouter(like start/stop vm, create lb, etc.) dns\ address\ [%s]\ has\ bean\ added\ to\ vpc\ router\ [uuid\:%s] = dns address [{0}] has bean added to vpc router [uuid:{1}] can\ not\ detach\ nic\ from\ vpc\ vr[uuid\:%s] = can not detach nic from vpc vr[uuid:{0}] there\ is\ no\ ip\ range\ for\ l3\ network[uuid\:%s] = there is no ip range for l3 network[uuid:{0}] the\ gateway[ip\:%s]\ of\ l3[uuid\:%s]\ has\ been\ occupied\ on\ vpc\ vr[uuid\:\ %s] = the gateway[ip:{0}] of l3[uuid:{1}] has been occupied on vpc vr[uuid: {2}] unable\ to\ ssh\ in\ to\ the\ vpc\ router[%s],\ the\ ssh\ port\ seems\ not\ open = unable to ssh in to the vpc router[{0}], the ssh port seems not open +the\ SSH\ port\ is\ not\ open\ after\ %s\ seconds.\ Failed\ to\ login\ the\ vpc\ router[ip\:%s] = the SSH port is not open after {0} seconds. Failed to login the vpc router[ip:{1}] Could\ not\ update\ this\ network\ service,\ due\ to\ vpc\ [uuid\:%s]\ is\ not\ support\ update\ network\ service\ version = Could not update this network service, due to vpc [uuid:{0}] is not support update network service version Could\ not\ update\ this\ network\ service,\ due\ to\ vpc\ [uuid\:%s]\ used\ old\ kernel\ version\:[%s] = Could not update this network service, due to vpc [uuid:{0}] used old kernel version:[{1}] Could\ not\ apply\ snat\ with\ non-default\ public\ network,\ due\ to\ multi\ snat\ feature\ is\ disabled = Could not apply snat with non-default public network, due to multi snat feature is disabled @@ -3740,9 +4589,11 @@ invalid\ monitor\ ip\ address\ [%s] = invalid monitor ip address [{0}] vpcHaRouter\ [uuid\:%s]\ is\ deleted = vpcHaRouter [uuid:{0}] is deleted there\ are\ more\ than\ 2\ vpc\ routers\ attached\ to\ haGroup\ [uuid\:%s] = there are more than 2 vpc routers attached to haGroup [uuid:{0}] ha\ group\ management\ l3\ and\ public\ l3\ networks[uuid\:%s]\ are\ different\ from\ offering\ l3\ networks\ [uuid\:%s] = ha group management l3 and public l3 networks[uuid:{0}] are different from offering l3 networks [uuid:{1}] +vpc\ router\ l3\ networks\ [uuid\:%s]\ are\ different\ from\ ha\ group\ l3\ networks\ [uuid\:%s],\ !!!\ please\ delete\ this\ router\ and\ recreate\ it = vpc router l3 networks [uuid:{0}] are different from ha group l3 networks [uuid:{1}], !!! please delete this router and recreate it vpc\ router\ has\ been\ attached\ to\ ha\ group\ [uuid\:%s] = vpc router has been attached to ha group [uuid:{0}] vpc\ ha\ group\ [uuid\:%s]\ is\ not\ existed = vpc ha group [uuid:{0}] is not existed there\ are\ more\ than\ 1\ vpc\ routers\ attached\ to\ haGroup\ [uuid\:%s] = there are more than 1 vpc routers attached to haGroup [uuid:{0}] +vpc\ router\ [uuid\:%s]\ can\ not\ be\ upgraded\ to\ ha\ router\ because\ it\ public\ network\ is\ same\ to\ management\ network = vpc router [uuid:{0}] can not be upgraded to ha router because it public network is same to management network create\ affinityGroup\ for\ ha\ group\ [uuid\:%s]\ failed = create affinityGroup for ha group [uuid:{0}] failed virtualrouter\ %s\ [uuid\:\ %s\ ]\ of\ VPC\ HA\ group\ %s\ [uuid\:\ %s]\ haStatus\ changed\ from\ %s\ to\ %s = virtualrouter {0} [uuid: {1} ] of VPC HA group {2} [uuid: {3}] haStatus changed from {4} to {5} ha\ group\ uuid\ nil = ha group uuid nil @@ -3750,113 +4601,20 @@ VR[uuid\:\ %s]\ not\ running = VR[uuid: {0}] not running VR[uuid\:\ %s]\ not\ connected = VR[uuid: {0}] not connected failed\ to\ enable\ ha\ on\ virtual\ router[uuid\:%s],\ %s = failed to enable ha on virtual router[uuid:{0}], {1} -# In Module: vpcFirewall -can\ not\ detach\ system\ default\ ruleSet = can not detach system default ruleSet -only\ system\ ruleSet\ can\ change\ action\ type = only system ruleSet can change action type -can\ not\ delete\ system\ default\ ruleSet = can not delete system default ruleSet -can\ not\ delete\ system\ default\ rule = can not delete system default rule -the\ router\ [uuid\:%s]\ does\ not\ has\ a\ master\ router = the router [uuid:{0}] does not has a master router -the\ VPC\ Router[uuid\:%s]\ already\ has\ a\ firewall. = the VPC Router[uuid:{0}] already has a firewall. -already\ has\ a\ rule\ template\ with\ name\ %s = already has a rule template with name {0} -the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ rule\ number\ %s. = the ruleSet[{0}] already has a rule with rule number {1}. -can\ not\ update\ default\ rule[%s] = can not update default rule[{0}] -only\ tcp\ protocol\ can\ use\ tcp\ flag = only tcp protocol can use tcp flag -only\ icmp\ protocol\ can\ use\ icmp\ type = only icmp protocol can use icmp type -the\ rule\ [%s]\ number\ is\ invalid = the rule [{0}] number is invalid -can\ not\ attach\ the\ default\ ruleSet\ to\ other\ nic = can not attach the default ruleSet to other nic -ruleSet[%s]\ already\ has\ a\ l3[%s] = ruleSet[{0}] already has a l3[{1}] -already\ has\ a\ rule\ with\ the\ number[%s] = already has a rule with the number[{0}] -the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ the\ rule\ number\ %s. = the ruleSet[{0}] already has a rule with the rule number {1}. -could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = could not add firewall rule[{0}] only tcp or udp protocol can use port -could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ protocol\ can\ use\ tcp\ flag = could not add firewall rule[{0}] only tcp protocol can use tcp flag -could\ not\ add\ firewall\ rule[%d]\ because\ only\ icmp\ protocol\ can\ use\ icmp\ type = could not add firewall rule[{0}] because only icmp protocol can use icmp type -could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = could not add firewall rule[{0}] because only tcp or udp protocol can use port -could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ protocol\ can\ use\ tcp\ flag = could not add firewall rule[{0}] because only tcp protocol can use tcp flag -could\ not\ add\ firewall\ rule[%d]\ because\ %s = could not add firewall rule[{0}] because {1} -could\ not\ add\ firewall\ rule,\ because\ ruleNo\ %d\ is\ invalid = could not add firewall rule, because ruleNo {0} is invalid -could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ action\ for\ ruleNo\:%d = could not add firewall rule, because there is no action for ruleNo:{0} -could\ not\ add\ firewall\ rule,\ because\ source\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because source IP length: {0} is not valid for ruleNo:{1} -could\ not\ add\ firewall\ rule,\ because\ destination\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because destination IP length: {0} is not valid for ruleNo:{1} -could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ state\ for\ ruleNo\:%d = could not add firewall rule, because there is no state for ruleNo:{0} -could\ not\ add\ firewall\ rule,\ because\ description\ length\ %s\ is\ not\ valid\ for\ ruleNo\:%d = could not add firewall rule, because description length {0} is not valid for ruleNo:{1} -the\ configuration\ file\ has\ format\ error = the configuration file has format error -the\ firewall\ rules\ in\ the\ configuration\ file\ have\ syntax\ errors\:\ %s = the firewall rules in the configuration file have syntax errors: {0} -sync\ firewall\ config\ failed,because\ %s = sync firewall config failed,because {0} -update\ firewall\ ruleSet\ action\ failed,\ because\ %s = update firewall ruleSet action failed, because {0} -Can\ not\ find\ l3[%]\ related\ mac\ on\ vRouter[%s] = Can not find l3[%] related mac on vRouter[{0}] -create\ firewall\ rule[%s]\ failed,\ because\ %s = create firewall rule[{0}] failed, because {1} -delete\ firewall\ on\ vRouter[%s],because\ %s = delete firewall on vRouter[{0}],because {1} -create\ firewall\ ruleSet[%s]\ failed,\ because\ %s = create firewall ruleSet[{0}] failed, because {1} -delete\ firewall\ rule\ failed\ on\ vRouter[%s],\ because\ %s = delete firewall rule failed on vRouter[{0}], because {1} -change\ firewall\ rule\ state\ on\ vRouter[%s]\ failed,\ because\ %s = change firewall rule state on vRouter[{0}] failed, because {1} -attach\ firewall\ ruleSet[%s]\ failed,\ because\ %s = attach firewall ruleSet[{0}] failed, because {1} -detach\ ruleSet\ failed,\ maybe\ it\ has\ been\ deleted = detach ruleSet failed, maybe it has been deleted -detach\ firewall\ ruleSet[%s]\ failed,because\ %s = detach firewall ruleSet[{0}] failed,because {1} -cannot\ find\ vpcFirewall[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewall[uuid:{0}], it may have been deleted -cannot\ find\ vpcFirewallRuleSet[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewallRuleSet[uuid:{0}], it may have been deleted -cannot\ find\ vpcFirewallIpSetTemplate[uuid\:%s],\ it\ may\ have\ been\ deleted = cannot find vpcFirewallIpSetTemplate[uuid:{0}], it may have been deleted -attach\ firewall\ ruleSet[%s]\ to\ l3[%s]\ failed,because\ %s = attach firewall ruleSet[{0}] to l3[{1}] failed,because {2} -detach\ firewall\ ruleSet\ from\ l3[%s]\ failed,because\ %s = detach firewall ruleSet from l3[{0}] failed,because {1} -find\ duplicate\ rule\ numbers\ %s\ on\ firewall[%s],l3[%s],forward[%s] = find duplicate rule numbers {0} on firewall[{1}],l3[{2}],forward[{3}] -no\ changes\ in\ ruleset\ %s = no changes in ruleset {0} -firewall\ %s\ related\ vpc\ not\ in\ running\ state = firewall {0} related vpc not in running state -can\ not\ delete\ ruleSet[%s]\ because\ it\ still\ attached\ to\ nic = can not delete ruleSet[{0}] because it still attached to nic -default\ ruleset\ %s\ can\ only\ attached\ to\ one\ interface\ forward,\ but\ find\ %s\ related\ interface = default ruleset {0} can only attached to one interface forward, but find {1} related interface -cannot\ find\ vpcFirewall[uuid\:%s]\ related\ vRouter = cannot find vpcFirewall[uuid:{0}] related vRouter - -# In Module: vxlan -cannot\ configure\ vxlan\ network\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = cannot configure vxlan network for vm[uuid:{0}] on the destination host[uuid:{1}] -cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ out\ of\ vni\ range = cannot allocate vni[{0}] in l2Network[uuid:{1}], out of vni range -cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ duplicate\ with\ l2Network[uuid\:%s] = cannot allocate vni[{0}] in l2Network[uuid:{1}], duplicate with l2Network[uuid:{2}] -find\ multiple\ vtep\ ips[%s]\ for\ one\ host[uuid\:%s],\ need\ to\ delete\ host\ and\ add\ again = find multiple vtep ips[{0}] for one host[uuid:{1}], need to delete host and add again -failed\ to\ find\ vtep\ on\ host[uuid\:\ %s],\ please\ re-attach\ vxlanpool[uuid\:\ %s]\ to\ cluster. = failed to find vtep on host[uuid: {0}], please re-attach vxlanpool[uuid: {1}] to cluster. -failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to create bridge[{0}] for l2Network[uuid:{1}, type:{2}, vni:{3}] on kvm host[uuid:{4}], because {5} -failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = failed to check cidr[{0}] for l2VxlanNetwork[uuid:{1}, name:{2}] on kvm host[uuid:{3}], {4} -failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to delete bridge[{0}] for l2Network[uuid:{1}, type:{2}, vni:{3}] on kvm host[uuid:{4}], because {5} -failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetworkPool[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = failed to check cidr[{0}] for l2VxlanNetworkPool[uuid:{1}, name:{2}] on kvm host[uuid:{3}], {4} -failed\ to\ realize\ vxlan\ network\ pool[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = failed to realize vxlan network pool[uuid:{0}, type:{1}] on kvm host[uuid:{2}], because {3} -vni[%s]\ for\ vxlan\ is\ invalid = vni[{0}] for vxlan is invalid -cannot\ allocate\ vni[%s]\ in\ vxlan\ network[uuid\:%s]\ which\ is\ already\ allocated = cannot allocate vni[{0}] in vxlan network[uuid:{1}] which is already allocated -need\ to\ input\ one\ system\ tag\ like\ \:\ [%s] = need to input one system tag like : [{0}] -wrong\ system\ tag\ [%s],\ should\ be\ like\ \:\ [%s] = wrong system tag [{0}], should be like : [{1}] -wrong\ cidr\ format\ in\ system\ tag\ [%s] = wrong cidr format in system tag [{0}] -overlap\ vni\ range\ with\ %s\ [%s] = overlap vni range with {0} [{1}] -vxlan\ network\ pool\ doesn't\ support\ create\ l3\ network = vxlan network pool doesn''t support create l3 network -Vni\ allocator\ strategy[%s]\ returns\ nothing,\ because\ no\ vni\ is\ available\ in\ this\ VxlanNetwork[name\:%s,\ uuid\:%s] = Vni allocator strategy[{0}] returns nothing, because no vni is available in this VxlanNetwork[name:{1}, uuid:{2}] -Cannot\ find\ L2NetworkClusterRefVO\ item\ for\ l2NetworkUuid[%s]\ clusterUuid[%s] = Cannot find L2NetworkClusterRefVO item for l2NetworkUuid[{0}] clusterUuid[{1}] -ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ exist = ip[{0}] l2NetworkUuid[{1}] clusterUuid[{2}] exist -ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = ip[{0}] l2NetworkUuid[{1}] clusterUuid[{2}] ip exist in local vtep -%s\:is\ not\ ipv4 = {0}:is not ipv4 -vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = vxlan vtep address for host [uuid : {0}] and pool [uuid : {1}] pair already existed - -# In Module: woodpecker - -# In Module: xdragon -xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = xdragon host not support create vm using an iso image. - -# In Module: yunshan -the\ url\ is\ null,\ please\ config\ the\ YunShan\ NSP. = the url is null, please config the YunShan NSP. - # In Module: zbox +usb\ device[uuid\:%s]\ has\ been\ attached\ VM[uuid\:%s],\ cannot\ be\ add\ to\ zbox = usb device[uuid:{0}] has been attached VM[uuid:{1}], cannot be add to zbox zbox[name\:%s]\ status\ is\ not\ Ready,\ current\ status\ is\ %s = zbox[name:{0}] status is not Ready, current status is {1} zbox[uuid\:%s]\ is\ still\ in\ use,\ cannot\ eject\ it = zbox[uuid:{0}] is still in use, cannot eject it zbox[uuid\:%s]\ is\ not\ Ready,\ cannot\ sync\ capacity. = zbox[uuid:{0}] is not Ready, cannot sync capacity. only\ file\ on\ zbox[mountPath\:%s]\ can\ be\ deleted.\ but\ pass\ [%s] = only file on zbox[mountPath:{0}] can be deleted. but pass [{1}] zbox[name\:%s]\ state\ is\ not\ Ready,\ current\ state\ is\ %s = zbox[name:{0}] state is not Ready, current state is {1} zbox[uuid\:\ %s]\ seems\ like\ removed = zbox[uuid: {0}] seems like removed - -# In Module: zboxbackup -please\ insert\ zbox\ to\ management\ node. = please insert zbox to management node. -some\ volume[uuids\:%s]\ recover\ failed.\ you\ can\ trigger\ it\ again\ by\ reconnect\ it. = some volume[uuids:{0}] recover failed. you can trigger it again by reconnect it. -there\ is\ another\ external\ backup[uuid\:\ %s]\ recovering = there is another external backup[uuid: {0}] recovering -both\ hostUuids\ and\ backupStorageUuids\ are\ empty.\ you\ must\ specify\ one\ or\ both\ of\ them. = both hostUuids and backupStorageUuids are empty. you must specify one or both of them. -cannot\ find\ recover.conf\ under\ zbox\ backup\ install\ dir. = cannot find recover.conf under zbox backup install dir. -zbox\ should\ be\ inserted\ to\ a\ host\ first. = zbox should be inserted to a host first. -fail\ to\ backup\ database = fail to backup database +please\ attach\ zbox\ to\ %s[uuid\:%s]\ and\ resume\ job.\ if\ you\ do\ not\ want\ to\ continue,\ cancel\ it. = please attach zbox to {0}[uuid:{1}] and resume job. if you do not want to continue, cancel it. # In Module: zbs failed\ to\ SSH\ or\ zbs-tools\ was\ not\ installed\ in\ MDS[%s],\ you\ need\ to\ check\ the\ SSH\ configuration\ and\ dependencies = failed to SSH or zbs-tools was not installed in MDS[{0}], you need to check the SSH configuration and dependencies failed\ to\ get\ MDS[%s]\ metadata,\ you\ need\ to\ check\ the\ ZBS\ configuration = failed to get MDS[{0}] metadata, you need to check the ZBS configuration +unable\ to\ connect\ to\ the\ ZBS\ primary\ storage[uuid\:%s],\ failed\ to\ connect\ all\ MDS = unable to connect to the ZBS primary storage[uuid:{0}], failed to connect all MDS ZBS\ primary\ storage[uuid\:%s]\ may\ have\ been\ deleted = ZBS primary storage[uuid:{0}] may have been deleted cannot\ found\ kvm\ host[uuid\:%s],\ unable\ to\ deploy\ client = cannot found kvm host[uuid:{0}], unable to deploy client no\ MDS\ is\ Connected,\ the\ following\ MDS[%s]\ are\ not\ Connected. = no MDS is Connected, the following MDS[{0}] are not Connected. @@ -3868,20 +4626,107 @@ not\ found\ MDS[%s]\ of\ zbs\ primary\ storage[uuid\:%s]\ node = not found MDS[{ all\ MDS\ of\ ZBS\ primary\ storage[uuid\:%s]\ are\ not\ in\ Connected\ state = all MDS of ZBS primary storage[uuid:{0}] are not in Connected state all\ MDS\ cannot\ execute\ http\ call[%s] = all MDS cannot execute http call[{0}] -# In Module: zsv -cannot\ delete\ disaster\ recovery\ license = cannot delete disaster recovery license -invalid\ ip\ format\ [%s] = invalid ip format [{0}] -volume\ %s\ still\ have\ snapshot\ group\ on\ vm\ %s,\ cannot\ attach\ to\ other\ vm = volume {0} still have snapshot group on vm {1}, cannot attach to other vm -volume\ %s\ still\ have\ snapshot\ group,\ cannot\ delete\ it = volume {0} still have snapshot group, cannot delete it -detach\ sharable\ volume\ or\ lun\ device\ before\ operating\ snapshot\ group = detach sharable volume or lun device before operating snapshot group -failed\ to\ find\ ZSV\ additional\ license\ info\:\ %s = failed to find ZSV additional license info: {0} -Failed\ to\ check\ SSH\ keys\ on\ host[%s\:%d] = Failed to check SSH keys on host[{0}:{1}] -SSH\ keys\ are\ incomplete\ on\ host[%s\:%d]. = SSH keys are incomplete on host[{0}:{1}]. -Failed\ to\ generate\ SSH\ keys\ on\ host[%s\:%d] = Failed to generate SSH keys on host[{0}:{1}] -failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node,\ node\:\ %s = failed to check if management node is also compute node, node: {0} -management\ node[uuid\:%s]\ must\ be\ a\ compute\ node = management node[uuid:{0}] must be a compute node -failed\ to\ retrieve\ host\ uuid\ [management\ uuid\:%s] = failed to retrieve host uuid [management uuid:{0}] -failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node = failed to check if management node is also compute node +# In Module: zce-x-plugin +field[adminToken]\ of\ message[APIAddZceXMsg]\ is\ mandatory\ when\ ZSphere\ and\ ZCE-X\ management\ node\ is\ not\ on\ the\ same\ host = field[adminToken] of message[APIAddZceXMsg] is mandatory when ZSphere and ZCE-X management node is not on the same host +field[managementIp]\ and\ field[uuid]\ can\ not\ be\ null\ on\ the\ same\ time = field[managementIp] and field[uuid] can not be null on the same time +field[managementIp]\ and\ field[uuid]\ can\ not\ be\ set\ on\ the\ same\ time = field[managementIp] and field[uuid] can not be set on the same time +field[otherManagementIp]\ of\ message[APIAddZceXMsg]\ is\ invalid\:\ ZceXVO.managementIp\ and\ otherManagementIp\ must\ be\ different = field[otherManagementIp] of message[APIAddZceXMsg] is invalid: ZceXVO.managementIp and otherManagementIp must be different +field[otherStorageIp]\ of\ message[APIAddZceXMsg]\ is\ invalid\:\ ZceXVO.managementIp,\ otherManagementIp\ and\ otherStorageIp\ must\ be\ different = field[otherStorageIp] of message[APIAddZceXMsg] is invalid: ZceXVO.managementIp, otherManagementIp and otherStorageIp must be different +invalid\ package\ status.\ only\ Installed,\ InitializeFailed\ or\ Initialized\ packages\ can\ be\ initialized = invalid package status. only Installed, InitializeFailed or Initialized packages can be initialized +failed\ to\ find\ active\ ceph\ cluster = failed to find active ceph cluster +failed\ to\ connect\ to\ ZCE-X[%s] = failed to connect to ZCE-X[{0}] +access-token\ is\ invalid = access-token is invalid +no\ management\ node\ is\ available\ to\ communicate\ with\ ZCE-X\:\ xms-cli\ is\ missing = no management node is available to communicate with ZCE-X: xms-cli is missing +ZceXVO\ with\ management\ ip[%s]\ already\ exists = ZceXVO with management ip[{0}] already exists +failed\ to\ create\ alert\ platform\ from\ ZCE-X[uuid\=%s] = failed to create alert platform from ZCE-X[uuid={0}] +cannot\ find\ management\ node\ by\ managementIp\ %s = cannot find management node by managementIp {0} +SSH\ passwordless\ setup\ failed\:\ %s = SSH passwordless setup failed: {0} +failed\ to\ generate\ ZCE-X\ env\ config\ from\ template = failed to generate ZCE-X env config from template +failed\ to\ generate\ admin\ token = failed to generate admin token +no\ token\ found\ with\ zceX[uuid\:%s] = no token found with zceX[uuid:{0}] +install.sh\ not\ found\:\ %s.\ install.sh\ has\ been\ manually\ deleted.\ please\ re-upload\ the\ installation\ package = install.sh not found: {0}. install.sh has been manually deleted. please re-upload the installation package +failed\ to\ install\ distributed\ storage = failed to install distributed storage +cannot\ find\ management\ node\ %s = cannot find management node {0} +cleanup.sh\ not\ found\:\ %s.\ the\ cleanup.sh\ has\ been\ manually\ deleted. = cleanup.sh not found: {0}. the cleanup.sh has been manually deleted. +init\ config\ not\ found\ for\ software\ package\:\ %s = init config not found for software package: {0} +failed\ to\ generate\ conf = failed to generate conf +failed\ to\ cleanup\ env = failed to cleanup env +failed\ to\ uninstall\ distributed\ storage\ on\ hosts = failed to uninstall distributed storage on hosts +failed\ to\ get\ cluster\ from\ ZCE-X = failed to get cluster from ZCE-X +no\ cluster\ exists\ in\ ZCE-X = no cluster exists in ZCE-X +failed\ to\ get\ licenses\ from\ ZCE-X = failed to get licenses from ZCE-X +failed\ to\ get\ hosts\ from\ ZCE-X = failed to get hosts from ZCE-X +failed\ to\ get\ pools\ from\ ZCE-X = failed to get pools from ZCE-X +failed\ to\ get\ version\ from\ ZCE-X = failed to get version from ZCE-X +failed\ to\ get\ users\ from\ ZCE-X = failed to get users from ZCE-X +invalid\ token\ for\ ZCE-X\ server = invalid token for ZCE-X server +failed\ to\ get\ ZCE-X\ version\:\ missing\ version\ file = failed to get ZCE-X version: missing version file +failed\ to\ get\ ZCE-X\ version\:\ read\ error = failed to get ZCE-X version: read error +failed\ to\ update\ expiration\ time\ with\ ZCE-X\ configuration\ file[%s] = failed to update expiration time with ZCE-X configuration file[{0}] +xms-cli\ env\ config\ file[%s]\ already\ exists.\ You\ must\ check\ and\ manually\ delete\ this\ file\ and\ trying\ again = xms-cli env config file[{0}] already exists. You must check and manually delete this file and trying again +failed\ to\ write\ xms-cli\ env\ config\ file\ to\ temp\ file = failed to write xms-cli env config file to temp file +failed\ to\ write\ ZCE-X\ cluster\ config\ file = failed to write ZCE-X cluster config file +xms-cli\ temporary\ admin\ token\ does\ not\ exist = xms-cli temporary admin token does not exist +failed\ to\ read\ xms-cli\ temporary\ admin\ token\:\ %s = failed to read xms-cli temporary admin token: {0} +failed\ to\ install\ ZCE-X\ cluster\ by\ %s = failed to install ZCE-X cluster by {0} +xms-cli\ does\ not\ exist = xms-cli does not exist +admin_token\ already\ exists.\ You\ must\ confirm\ that\ the\ current\ admin_token\ is\ no\ longer\ in\ use,\ delete\ the\ token\ by\ command\ 'xms-cli\ access-token\ delete',\ and\ try\ again = admin_token already exists. You must confirm that the current admin_token is no longer in use, delete the token by command ''xms-cli access-token delete'', and try again +failed\ to\ login\ ZCE-X\ when\ creating\ access\ token = failed to login ZCE-X when creating access token +failed\ to\ create\ ZCE-X\ access\ token = failed to create ZCE-X access token +xms-cli\ returns\ invalid\ access_token = xms-cli returns invalid access_token +failed\ to\ get\ license\ content = failed to get license content +failed\ to\ read\ license\ bytes\:\ %s = failed to read license bytes: {0} +failed\ to\ extract\ ZCE-X\ license\ content\ from\ tarball = failed to extract ZCE-X license content from tarball +No\ enc\ license\ file\ (for\ ZCE-X\ server)\ found\ in\ tarball.\ Skip\ updating\ license\ and\ continue = No enc license file (for ZCE-X server) found in tarball. Skip updating license and continue +failed\ to\ write\ zce-x-license.tar.gz\:\ %s = failed to write zce-x-license.tar.gz: {0} +failed\ to\ login\ ZCE-X\ when\ updating\ license = failed to login ZCE-X when updating license +failed\ to\ update\ ZCE-X\ license = failed to update ZCE-X license +failed\ to\ login\ ZCE-X\ when\ getting\ license\ content = failed to login ZCE-X when getting license content +failed\ to\ list\ ZCE-X\ clusters = failed to list ZCE-X clusters +access\ token\ is\ empty = access token is empty +failed\ to\ create\ template\ file\:\ %s = failed to create template file: {0} +failed\ to\ get\ ZCE-X\ version = failed to get ZCE-X version +ZCE-X\ third\ party\ alert\ platform\ is\ already\ existing = ZCE-X third party alert platform is already existing +ZCE-X\ token\ is\ not\ existing = ZCE-X token is not existing +ZCE-X\ Storage = ZCE-X Storage +failed\ to\ get\ ZCE-X\ license\:\ no\ ZCE-X\ found\ with\ uuid[%s] = failed to get ZCE-X license: no ZCE-X found with uuid[{0}] +more\ than\ one\ ZCE-X\ found.\ You\ should\ specify\ the\ target\ ZCE-X\ for\ uploading\ the\ license\ by\ field\ monitorIp = more than one ZCE-X found. You should specify the target ZCE-X for uploading the license by field monitorIp +only\ one\ ZCE-X\ can\ be\ specified\ for\ uploading\ the\ license\ by\ field\ monitorIp = only one ZCE-X can be specified for uploading the license by field monitorIp +failed\ to\ get\ ZCE-X\ license\:\ no\ ZCE-X\ found\ with\ ip[%s] = failed to get ZCE-X license: no ZCE-X found with ip[{0}] +failed\ to\ get\ ZCE-X\ license\:\ no\ token = failed to get ZCE-X license: no token + +# In Module: zstone-plugin +field[hosts]\ must\ not\ be\ null\ or\ empty = field[hosts] must not be null or empty +at\ least\ 2\ hosts\ are\ required\ for\ ZStone\ initialization = at least 2 hosts are required for ZStone initialization +field[uuid]\ in\ hosts\ must\ not\ be\ null\ or\ empty = field[uuid] in hosts must not be null or empty +duplicated\ host\ uuid(s)\ found\:\ %s = duplicated host uuid(s) found: {0} +field[uuid]\ of\ message[APIUpdateZStoneHostConfigMsg].hosts\ are\ invalid\:\ %s = field[uuid] of message[APIUpdateZStoneHostConfigMsg].hosts are invalid: {0} +field[publicIp]\ of\ message[APIUpdateZStoneHostConfigMsg].hosts\ is\ mandatory,\ can\ not\ be\ null = field[publicIp] of message[APIUpdateZStoneHostConfigMsg].hosts is mandatory, can not be null +failed\ to\ connect\ to\ zstone[%s] = failed to connect to zstone[{0}] +ZStone\ with\ management\ ip[%s]\ already\ exists = ZStone with management ip[{0}] already exists +ZStone[%s]\ has\ %s\ block\ clusters,\ we\ must\ ensure\ no\ other\ clusters\ in\ ZStone\ before\ add\ new\ cluster = ZStone[{0}] has {1} block clusters, we must ensure no other clusters in ZStone before add new cluster +all\ management\ nodes\ must\ have\ the\ same\ username\ and\ password = all management nodes must have the same username and password +all\ storage\ nodes\ must\ have\ the\ same\ username\ and\ password = all storage nodes must have the same username and password +config\ parameter\ cannot\ be\ null\ or\ empty = config parameter cannot be null or empty +install\ config\ not\ found\ for\ software\ package\:\ %s = install config not found for software package: {0} +failed\ to\ authorize\ in\ ZStone\ server = failed to authorize in ZStone server +failed\ to\ get\ licenses\ from\ ZStone = failed to get licenses from ZStone +failed\ to\ get\ licenses\ from\ ZStone\:\ %s = failed to get licenses from ZStone: {0} +failed\ to\ reload\ licenses\ from\ ZStone = failed to reload licenses from ZStone +failed\ to\ get\ clusters\ from\ ZStone\ 5.2.x\ /\ 5.3.x = failed to get clusters from ZStone 5.2.x / 5.3.x +failed\ to\ get\ clusters\ from\ ZStone\ (version\ >\=\ 5.4.x) = failed to get clusters from ZStone (version >= 5.4.x) +failed\ to\ get\ clusters\ from\ ZStone = failed to get clusters from ZStone +failed\ to\ get\ host\ info\ from\ ZStone = failed to get host info from ZStone +failed\ to\ get\ pool\ info\ from\ ZStone = failed to get pool info from ZStone +failed\ to\ get\ session\ info\ from\ ZStone = failed to get session info from ZStone +failed\ to\ update\ cluster = failed to update cluster +failed\ to\ add\ host = failed to add host +failed\ to\ add\ ZStone\ hosts = failed to add ZStone hosts +invalid\ session\ for\ ZStone\ server = invalid session for ZStone server +ZStone\ session\ expired = ZStone session expired +failed\ to\ get\ api\ response\ for\ path[%s] = failed to get api response for path[{0}] +ZStone\ API\ failed = ZStone API failed +Failed\ to\ find\ ZStone\ with\ uuid\ [%s] = Failed to find ZStone with uuid [{0}] # In Module: zwatch unknown\ parameter[%s]\ in\ zwatch\ return\ with\ clause,\ %s = unknown parameter[{0}] in zwatch return with clause, {1} @@ -3967,6 +4812,7 @@ invalid\ argument[limit\:%s],\ it\ can't\ be\ a\ negative\ number = invalid argu invalid\ argument[start\:%s],\ it\ can't\ be\ a\ negative\ number = invalid argument[start:{0}], it can''t be a negative number value[%s]\ is\ not\ a\ Integer\ number = value[{0}] is not a Integer number unknown\ argument[%s] = unknown argument[{0}] +there\ are\ multiple\ EventFamily\ with\ the\ name[%s],\ you\ must\ specify\ the\ label[%s] = there are multiple EventFamily with the name[{0}], you must specify the label[{1}] invalid\ query\ label[%s].\ Allowed\ label\ names\ are\ %s = invalid query label[{0}]. Allowed label names are {1} cannot\ find\ EventFamily[name\:%s,\ namespace\:%s] = cannot find EventFamily[name:{0}, namespace:{1}] cannot\ find\ EventFamily[name\:%s] = cannot find EventFamily[name:{0}] diff --git a/conf/i18n/messages_zh_CN.properties b/conf/i18n/messages_zh_CN.properties index 2620368db78..c022aed6460 100755 --- a/conf/i18n/messages_zh_CN.properties +++ b/conf/i18n/messages_zh_CN.properties @@ -6,6 +6,16 @@ If\ a\ specified\ Accesskey\ is\ expected,\ the\ AccesskeyId\ and\ the\ Accesske Access\ key\ ID\ and\ secret\ cannot\ contain\ '\:' = The\ number\ of\ access\ keys\ for\ account[uuid\=%s]\ has\ exceeded\ the\ maximum\ limit = +# In Module: account-import +unable\ to\ support\ third\ party\ account\ source\ of\ type[%s] = +third\ party\ user[credentials\=%s]\ has\ already\ binding\ to\ other\ account = +invalid\ account\ spec\:\ accountUuid\ is\ null = +invalid\ account\ spec\:\ failed\ to\ find\ account[uuid\=%s] = +account[uuid\=%s]\ has\ already\ binding\ other\ third\ party\ source = +failed\ to\ import\ account\ from\ source[uuid\=%s,\ type\=%s] = 从 {1} 类型的源 {0} 导入账户失败 +failed\ to\ unbinding\ accounts\ from\ source[uuid\=%s,\ type\=%s] = {1} 类型的源 {0} 解绑账户失败 +failed\ to\ delete\ source[uuid\=%s,\ type\=%s] = 删除 {1} 类型的账户源 {0} 失败 + # In Module: acl not\ support\ the\ ip\ version\ %d = 不支持的IP版本{0} %s\ duplicate/overlap\ ip\ entry\ with\ access-control-list\ group\:%s = {0}中有和访问控制组{1}里的ip重复/重叠 @@ -21,12 +31,97 @@ domain\ and\ url\ can\ not\ both\ empty = 域和URL不能同时为空 domain[%s]\ is\ not\ validate\ domain = 域[{0}]不是验证域 url[%s]\ is\ not\ validate\ url = URL[{0}]不是验证URL +# In Module: aliyun-storage +accessKey\ and\ keySecret\ must\ be\ set = 必须设置 accessKey 和 keySecret +ocean\ api\ endpoint\ must\ not\ be\ null = ocean API 的 endpoint 不能为空 +accessKey\ and\ keySecret\ must\ be\ set! = 必须设置 accessKey 和 keySecret +regionId\ must\ be\ set! = 必须设置 regionId +no\ current\ used\ key/secret\ for\ %s! = 没有当前使用的 {0} 密钥/密钥对 +Not\ a\ valid\ message! = 不是有效的消息 +%s\ failed,\ ErrorCode\:\ %s,\ ErrorMessage\:\ %s = {0} 失败,错误代码:{1},错误消息:{2} +Device\ Not\ Ready\ in\ %d\ milli\ seconds = 设备在 {0} 毫秒内未就绪 +snapshot\ task\ cannot\ finished\ in\ %d\ milliseconds,\ now\ progress\ is\ %d,\ status\ is\ %s = 快照任务无法在 {0} 毫秒内完成,当前进度为 {1},状态为 {2} +snapshot\ task\ status\ is\ finished\ %s = 快照任务状态已完成 {0} +not\ supported\ HybridClient = 不支持的 HybridClient +arg\ 'endpoint'\ must\ be\ set\ in\ %s\ type = 参数 ''endpoint'' 必须在 {0} 类型中设置 +not\ supported\ datacenter\ [%s]\ type\ here! = 此处不支持数据中心 [{0}] 类型 +must\ indicate\ zoneId\ in\ private\ aliyun. = 必须在私有阿里云中指定 zoneId +make\ ocean\ api\ signature\ string\ failed\:\ %s = 生成 ocean API 签名字符串失败:{0} +url(ocean\ endpoint)\ must\ be\ set\ for\ aliyun\ ebs\ backupstorage = 必须为阿里云 EBS 备份存储设置 URL(ocean 端点) +couldn't\ find\ domain\ on\ such\ oss\:\ [%s] = +aliyun\ ebs\ backup\ storage\ do\ not\ support\ to\ cancel\ download\ image = +no\ such\ object\ %s\ found\ in\ bucket\ %s = +couldn't\ find\ such\ oss\ bucket\:\ [%s] = +aliyun\ ebs\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = +cannot\ delete\ oss\ bucket\ [%s],\ Aliyun\ Ebs\ BackupStorage\ [%s]\ still\ existed,\ please\ delete\ it\ first. = +cannot\ find\ device\ path\ from\ volume\:\ %s = +aliyun\ ebs\ not\ support\ resize\ on\ running\ vm\ now. = +iso\ [%s]\ has\ been\ attached,\ we\ can\ not\ attach\ it\ until\ detach\ it = +url(ocean\ endpoint)\ must\ be\ set\ for\ aliyun\ ebs\ primarystorage = +url\ must\ starts\ with\ http\://\ or\ https\://,\ but\ got\ %s = +panguPartitionUuid\ or\ identityZoneUuid\ must\ be\ set. = +panguPartitionUuid\ [%s]\ not\ be\ matched\ with\ identityZoneUuid\ [%s] = +the\ aliyun\ ebs\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = 阿里云 EBS 主存储[uuid:{0}, name:{1}] 在附加的集群中找不到可用于实例化卷的可用主机 +cannot\ find\ snapshot\ from\ image\:\ %s,\ maybe\ the\ image\ has\ been\ deleted = +ebs\ primarystorage\ cannot\ support\ decrease\ size\ now = +create\ snapshot\ timeout,\ progress\ is\ %d = +couldn’t\ find\ any\ BackupStorage\ that\ is\ connected\ and\ enabled\ for\ commiting\ volume\ [uuid\:%s] = +aliyun\ ebs\ primarystorage\ only\ support\ aliyun\ ebs\ bs,\ actually\ get\ type\:\ %s = +cannot\ delete\ identity\ zone\ [%s],\ Aliyun\ Ebs\ PrimaryStorage\ [%s]\ still\ existed,\ please\ delete\ it\ first. = +append\ volumeId\:\ %s,\ but\ another\ volumeId\ existed\ in\ url\:\ %s = +hostUuid\ [%s]\ already\ existed\ in\ url\:\ %s = +invalid\ install\ url\:\ %s = +cannot\ find\ devicePath\ on\ host\:\ %s = +invalid\ install\ path\:\ %s = +invalid\ snapshot\ install\ path\:\ %s = +PrimaryStorage\ [%s]\ still\ running,\ can\ not\ delete\ access\ group = +access\ group\ rule\ [%s]\ already\ existed\ in\ access\ group\ [%s] = +access\ group\ [%s]\ already\ existed\ in\ datacenter\ [%s] = +no\ filesystem\ [%s]\ found\ in\ region\:\ %s = +nas\ filesystem\ existed\ in\ datacenter\:\ %s = +some\ primary\ storage\ [%s]\ used\ this\ nas,\ can\ not\ delete\ it\ until\ delete\ the\ primary\ storage. = +mount\ domain\ not\ valid\ after\ %d\ milliseconds,\ delete\ it... = +no\ such\ mount\ target\ [%s]\ in\ nas\:\ %s = +there\ are\ no\ nas\ access\ group\ existed,\ please\ create\ at\ least\ one = +nas\ mount\ target\ [%s]\ existed\ in\ filesystem\:\ %s = +the\ access\ group\ attached\ is\ already\:\ %s = +no\ connected\ host\ found\ in\ the\ cluster[uuid\:%s] = +AliyunNasAccessGroupVO[%s]\ is\ not\ existed,\ may\ be\ it\ has\ been\ deleted! = +EcsVSwitchVO[%s]\ is\ not\ existed,\ may\ be\ it\ has\ been\ deleted! = +cannot\ find\ an\ available\ host\ to\ operation\ in\ primary\ storage\:\ %s = +failed\ to\ ping\ aliyun\ nas\ primary\ storage[uuid\:%s]\ from\ host[uuid\:%s],because\ %s.\ disconnect\ this\ host-ps\ connection = 从主机[uuid:{1}] ping 阿里云 NAS 主存储[uuid:{0}] 失败,因为 {2}。断开此主机-存储连接 +operation\ error,\ because\:%s = +nas\ primary\ storage\ not\ mounted,\ please\ init\ it\ first! = +cannot\ find\ any\ BackupStorageKvmFactory\ for\ the\ type[%s] = +cannot\ find\ host\ to\ operate\ volume\:\ [%s] = +cannot\ find\ and\ host\ to\ sync\ volume\ size\ in\ primary\:\ %s = +image\ [%s]\ has\ been\ deleted,\ cannot\ reinit\ root\ volume\ from\ it = +no\ available\ host\ could\ check\ mountPath! = +unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = +No\ backup\ storage\ to\ commit\ volume\ [uuid\:\ %s] = +aliyun\ nas\ primarystorage\ only\ support\ imagestore\ bs,\ actually\ get\ type\:\ %s = +unable\ to\ commit\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = +image\ [uuid\:%s]\ has\ been\ deleted = +the\ volume[uuid;%s]\ is\ attached\ to\ a\ VM[uuid\:%s]\ which\ is\ in\ state\ of\ %s,\ cannot\ do\ the\ snapshot\ merge = +unable\ to\ attach\ a\ primary\ storage\ to\ cluster.\ Kvm\ host[uuid\:%s,\ name\:%s]\ in\ cluster\ has\ qemu-img\ with\ version[%s];\ but\ the\ primary\ storage\ has\ attached\ to\ a\ cluster\ that\ has\ kvm\ host[uuid\:%s],\ which\ has\ qemu-img\ with\ version[%s].\ qemu-img\ version\ greater\ than\ %s\ is\ incompatible\ with\ versions\ less\ than\ %s,\ this\ will\ causes\ volume\ snapshot\ operation\ to\ fail.\ Please\ avoid\ attaching\ a\ primary\ storage\ to\ clusters\ that\ have\ different\ Linux\ distributions,\ in\ order\ to\ prevent\ qemu-img\ version\ mismatch = +no\ available\ host\ could\ download\ imagecache! = +the\ aliyun\ nas\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = +the\ aliyun\ nas\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ delete\ bits\ on\ primarystorage = +not\ support = +the\ AliyunNAS\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = +the\ Aliyun\ Nas\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = +failed\ to\ check\ mount\ path\ on\ host\:\ %s = +cannot\ find\ a\ host\ to\ cleanup\ image\ cache. = +resource[uuid\:\ %s]\ cannot\ found = +cannot\ find\ available\ host\ for\ operation\ on\ primary\ storage[uuid\:%s]. = +host\ where\ vm[uuid\:%s]\ locate\ is\ not\ Connected. = +appName\:\ %s,\ partitionName\:\ %s\ is\ existed\ in\ identityZone\:\ %s = + # In Module: aliyunproxy # In Module: applianceVm there\ is\ no\ available\ nicType\ on\ L3\ network\ [%s] = appliance\ vm[uuid\:%s]\ is\ in\ status\ of\ %s\ that\ cannot\ make\ http\ call\ to\ %s = 系统虚拟机[uuid:{0}]处于{1}状态,无法对[{2}]执行HTTP RPC调用 -operation\ error,\ because\:%s = 操作错误,因为{0} appliance\ vm\ %s\ stopped = 应用装置VM{0}已停止 appliance\ vm\ %s\ reboot = 应用装置虚拟机{0}重新启动 appliance\ vm\ %s\ reboot\ failed = 应用装置虚拟机{0}重新启动失败 @@ -78,6 +173,7 @@ AutoScalingRuleVO[uuid\:%s]\ is\ %s,\ state\ change\ is\ not\ allowed = AutoScal # In Module: baremetal IPMI\ Address\ %s\ is\ not\ valid = IPMI地址{0}是无效的 +Failed\ to\ reach\ the\ bare-metal\ chassis,\ please\ make\ sure\:\ 1.\ the\ IPMI\ connection\ is\ active;\ 2.\ the\ IPMI\ Address,\ Port,\ Username\ and\ Password\ are\ correct;\ 3.\ IPMI\ Over\ LAN\ is\ enabled\ in\ BIOS. = Baremetal\ Chassis\ of\ IPMI\ address\ %s\ and\ IPMI\ port\ %d\ has\ already\ been\ created. = IPMI地址为{0},端口为{1}的裸金属设备已经被创建 Cluster[uuid\:%s]\ does\ not\ exists. = 集群[uuid:{0}]不存在 Cluster[uuid\:%s]\ is\ not\ a\ baremetal\ cluster. = 集群[uuid:{0}]不是一个裸金属集群 @@ -86,6 +182,8 @@ IPMI\ Address\ and\ Port\ %s\:%d\ already\ exists. = IPMI地址为{0},端口 no\ usable\ baremetal\ pxeserver\ attached\ to\ cluster[uuid\:%s] = 裸金属集群[uuid:{0}]中没有可用的部署服务器 baremetal\ chassis[uuid\:%s]\ is\ supposed\ to\ using\ pxeserver[uuid\:%s],\ but\ it\ was\ pxeserver[uuid\:%s]\ that\ actually\ handled\ the\ DHCP\ request = 裸金属设备[uuid:{0}]应当由部署服务器[uuid:{1}]提供DHCP服务,但实际情况是部署服务器[uuid:{2}]提供的DHCP服务 License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ baremetal. = +Hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ baremetal\ chassis,\ but\ we\ detect\ there\ are\ %s\ in\ the\ database.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = +Insufficient\ baremetal\ chassis\ number\ licensed.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = failed\ to\ delete\ baremetal\ chassis\ %s = 无法删除裸机机箱{0} Failed\ to\ remotely\ power\ on\ baremetal\ chassis[uuid\:%s] = 无法远程启动裸金属设备[uuid:{0}] Failed\ to\ remotely\ power\ reset\ baremetal\ chassis[uuid\:%s] = 无法远程重启裸金属设备[uuid:{0}] @@ -135,6 +233,7 @@ PXE\ Server\ DHCP\ Interface\ %s\ does\ not\ exists,\ or\ it\ does\ not\ have\ a cluster[uuid\:%s]\ and\ pxeserver[uuid\:%s]\ don't\ belong\ to\ one\ zone = 裸金属集群[uuid:{0}]和部署服务器[uuid:{1}]不属于同一个数据中心 cluster[uuid\:%s]\ is\ not\ baremetal\ cluster = 集群[uuid:{0}]不是一个裸金属集群 baremetal\ pxeserver[uuid\:%s]\ already\ attached\ to\ cluster[uuid\:%s] = 部署服务器[uuid:{0}]已经挂载到裸金属集群[uuid:{1}],无需再次挂载 +baremetal\ pxeserver[uuid\:%s]\ is\ not\ compatible\ with\ baremetal\ instances\ in\ cluster[uuid\:%s],\ existing\ nic\ ip\ %s\ is\ out\ of\ pxeserver\ dhcp\ range\ %s\ ~\ %s. = baremetal\ pxeserver[uuid\:\ %s]\ not\ attached\ to\ cluster[uuid\:\ %s] = 部署服务器[uuid:{0}]没有挂载到裸金属集群[uuid:{1}] failed\ to\ init\ configs\ on\ baremetal\ pxeserver[uuid\:%s] = 部署服务器[uuid:{0}]初始化配置失败 failed\ to\ create\ bm\ instance\ configs\ on\ baremetal\ pxeserver[uuid\:%s] = 在部署服务器[uuid:{0}]上创建裸金属主机相关配置失败 @@ -147,6 +246,7 @@ failed\ to\ start\ baremetal\ pxeserver[uuid\:%s] = 启动部署服务器[uuid:{ failed\ to\ stop\ baremetal\ pxeserver[uuid\:%s] = 停止部署服务器[uuid:{0}]失败 failed\ to\ create\ dhcp\ config\ of\ chassis[uuid\:%s]\ on\ pxeserver[uuid\:%s] = 无法在Pxeserver[uuid:{1}]上创建机箱[uuid:{0}]的DHCP配置 failed\ to\ delete\ dhcp\ config\ of\ chassis[uuid\:%s]\ on\ pxeserver[uuid\:%s] = 无法删除机箱[uuid:{0}](在Pxeserver[uuid:{1}]上)的DHCP配置 +the\ uuid\ of\ baremtal\ pxeserver\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = unable\ to\ connect\ to\ baremetal\ pxeserver[url\:%s],\ because\ %s = 连接部署服务器[uuid:{0}]失败,因为: {1} failed\ to\ mount\ baremetal\ cache\ of\ image[uuid\:%s] = 挂载裸金属镜像缓存[uuid:{0}]失败 no\ enough\ space\ left\ in\ baremetal\ image\ cache\ for\ image[uuid\:%s] = 部署服务器存储路径剩余空间不足 @@ -156,9 +256,11 @@ unsupported\ backup\ storage\ type\ for\ baremetal = 裸金属管理所不支持 # In Module: baremetal2 bond\ name\ %s\ has\ been\ existed = 债券名称{0}已存在 nic\ with\ mac\:%s\ has\ been\ bonded = 已绑定具有MAC:{0}的NIC +cannot\ find\ the\ cluster\ of\ baremetal2\ chassis[uuid\:%s],\ maybe\ it\ doesn't\ exist = there\ is\ no\ baremetal2\ gateway\ found\ in\ cluster[uuid\:%s] = 在集群[uuid:{0}]中找不到BareMetal2网关 there\ is\ no\ usable\ baremetal2\ gateway\ found\ in\ cluster[uuid\:%s] = 在集群[uuid:{0}]中找不到可用的Baremetal2网关 there\ is\ no\ baremetal2\ provision\ network\ found\ in\ cluster[uuid\:%s] = 在集群[uuid:{0}]中找不到BareMetal2配置网络 +baremetal2\ provision\ network[uuid\:%s]\ is\ not\ usable,\ make\ sure\ it's\ Enabled = wrong\ baremetal2\ chassis\ hardware\ info\ format\:\ %s = 错误的Baremetal2机箱硬件信息格式:{0} the\ cpu\ architecture\ of\ the\ chassis[arch\:%s]\ and\ the\ cluster[arch\:%s]\ don't\ match = 机箱[arch:{0}]和集群[arch:{1}]的CPU体系结构不匹配 only\ baremetal2\ chassis\ with\ boot\ mode\ %s\ is\ supported = 仅支持引导模式为{0}的BareMetal2机箱 @@ -168,10 +270,14 @@ wrong\ baremetal2\ chassis\ disk\ hardware\ info\ format\:\ %s = 错误的Bareme other\ chassis\ has\ nics\ with\ the\ same\ mac\ address,\ which\ is\ impossible = 其他机箱具有相同MAC地址的NIC,这是不可能的 BareMetal2\ Chassis[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled = Baremetal2机箱[uuid:{0}]不存在或已禁用 no\ available\ baremetal2\ chassis\ found = 找不到可用的Baremetal2机箱 +no\ available\ baremetal2\ chassis\ found\ in\ baremetal2\ clusters[uuids\:%s] = Cannot\ find\ BareMetal2\ Chassis[uuid\:%s],\ it\ may\ have\ been\ deleted = License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ elastic-baremetal. = +Hijacked\ detected.\ Your\ license[%s]\ permits\ %s\ elastic-baremetal\ chassis,\ but\ we\ detect\ there\ are\ %s\ in\ the\ database.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = +Insufficient\ elastic-baremetal\ chassis\ number\ licensed.\ You\ can\ either\ delete\ additional\ chassis\ or\ apply\ a\ new\ license. = not\ supported = 不支持 Bare\ Metal\ IPMI\ 2\ Chassis\ %s\:%d\ already\ exists = 裸机IPMI 2机箱{0}:{1}已存在 +Failed\ to\ reach\ the\ baremetal2\ chassis,\ please\ make\ sure\:\ 1.\ the\ IPMI\ connection\ is\ active;\ 2.\ the\ IPMI\ Address,\ Port,\ Username\ and\ Password\ are\ correct;\ 3.\ IPMI\ Over\ LAN\ is\ enabled\ in\ BIOS. = BareMetal2\ Chassis\ of\ IPMI\ address\ %s\ and\ IPMI\ port\ %d\ has\ already\ been\ created. = 已创建IPMI地址为{0}、IPMI端口为{1}的BareMetal2机箱。 Cluster[uuid\:%s]\ is\ not\ a\ BareMetal2\ Cluster. = 集群[uuid:{0}]不是BareMetal2集群。 no\ usable\ baremetal2\ gateway\ in\ cluster[uuid\:%s] = 集群[uuid:{0}]中没有可用的Baremetal2网关 @@ -185,25 +291,43 @@ received\ hardware\ info\ for\ unknown\ baremetal2\ chassis[ipmi_addr\:%s,\ ipmi cluster\ type\ and\ hypervisor\ type\ should\ all\ be\ baremetal2\ or\ all\ not = 集群类型和虚拟机管理程序类型应全部为BareMetal2或全部为非BareMetal2 the\ architecture\ must\ be\ set\ when\ create\ new\ baremetal2\ clusters = 创建新的Baremetal2集群时必须设置体系结构 do\ not\ add\ host\ into\ baremetal2\ cluster = 不要将主机添加到BareMetal2集群中 +l2\ network\ should\ not\ have\ the\ same\ interface\ name\ with\ provision\ network\ that's\ already\ attached\ to\ the\ cluster = Can\ not\ attach\ third-party\ ceph\ with\ token\ into\ aarch64\ cluster. = 无法使用令牌将第三方 分布式存储 附加到Aarch64集群。 Can\ not\ attach\ local\ storage\ into\ baremetal2\ cluster. = 无法将本地存储连接到BareMetal2集群。 +failed\ to\ delete\ convert\ volume\ to\ chassis\ local\ disk\ configurations\ in\ gateway[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s] = baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ detach\ provision\ nic\ from\ bonding = +failed\ to\ detach\ provision\ nic\ to\ bonding\ on\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = baremetal2\ instance[uuid\:%s]\ not\ connected,\ cannot\ attach\ provision\ nic\ to\ bond = +failed\ to\ attach\ provision\ nic\ to\ bonding\ on\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = chassis\:%s\ disk\ does\ not\ have\ wwn\ info,\ please\ inspect\ chassis\ and\ try\ again = 机箱:{0}磁盘没有WWN信息,请检查机箱并重试 convert\ image\ data\ to\ local\ disk\ failed = 将图像数据转换到本地磁盘失败 baremetal2\ instance[uuid\:%s]\ convert\ volume\ failed\ on\ baremetal2\ chassis[uuid\:%s]\ ,\ timeout\ after\ %s\ minutes\ = Baremetal2实例[uuid:{0}]转换卷在Baremetal2机箱[uuid:{1}]上失败,{2}分钟后超时 failed\ to\ prepare\ provision\ network\ in\ gateway[uuid\:%s],\ because\ %s = 无法在网关[uuid:{0}]中准备设置网络,因为{1} failed\ to\ destroy\ provision\ network\ in\ gateway[uuid\:%s],\ because\ %s = 无法销毁网关[uuid:{0}]中的设置网络,因为{1} no\ provision\ nic\ found\ for\ baremetal2\ instance[uuid\:%s] = 未找到BareMetal2实例[uuid:{0}]的配置NIC +failed\ to\ create\ provision\ configurations\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ delete\ provision\ configurations\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ create\ console\ proxy\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ change\ default\ network\ from\ l3[uuid\:%s]\ to\ l3[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s],\ because\ %s = +failed\ to\ ping\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ change\ the\ password\ of\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = failed\ to\ power\ on\ baremetal2\ chassis[uuid\:%s]\ using\ ipmitool = 无法使用ipmitool打开Baremetal2机箱[uuid:{0}]的电源 failed\ to\ power\ off\ baremetal2\ chassis[uuid\:%s]\ using\ ipmitool = 无法使用ipmitool关闭Baremetal2机箱[uuid:{0}]的电源 +failed\ to\ power\ off\ baremetal2\ instance[uuid\:%s]\ by\ bm\ agent,\ because\ %s = baremetal2\ chassis[uuid\:%s]\ is\ still\ not\ POWER_OFF\ %d\ seconds\ later = Baremetal2机箱[uuid:{0}]在{1}秒后仍未关闭电源_。 vmInstanceUuids\ is\ empty = VMInstanceUuids为空 the\ baremetal2\ gateway[uuid\:%s,\ status\:%s]\ is\ not\ Connected = Baremetal2网关[uuid:{0},状态:{1}]未连接 baremetal2\ instance[uuid\:%s]\ not\ connected,\ cannot\ attach\ nic\ to\ it = BareMetal2实例[uuid:{0}]未连接,无法将NIC连接到该实例 +failed\ to\ attach\ nic[uuid\:%s]\ to\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ detach\ nic\ from\ it = BareMetal2实例[uuid:{0}]未连接,无法将NIC与其分离 +failed\ to\ detach\ nic[uuid\:%s]\ from\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ prepare\ volume[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ attach\ volume[uuid\:%s]\ to\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = baremetal2\ instance[uuid\:%s]\ is\ not\ connected,\ cannot\ attach\ volume\ to\ it = BareMetal2实例[uuid:{0}]未连接,无法将卷附加到该实例 +failed\ to\ get\ volume[uuid\:%s]\ lunid\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = failed\ to\ get\ gateway\ ips\ of\ the\ access\ path[iscsiPath\:\ %s]\ for\ block\ volume\ %s,\ because\ %s = +failed\ to\ detach\ volume[uuid\:%s]\ from\ baremetal2\ instance[uuid\:%s]\ through\ gateway[uuid\:%s],\ because\ %s = +failed\ to\ destroy\ volume[uuid\:%s]\ for\ baremetal2\ instance[uuid\:%s]\ in\ gateway[uuid\:%s],\ because\ %s = all\ ceph\ mons\ of\ primary\ storage[uuid\:%s]\ are\ not\ in\ Connected\ state = 分布式存储[uuid:{0}]所有的监控节点都不是已连接状态 there\ is\ already\ a\ baremetal\ pxe\ server\ with\ management\ ip\ %s,\ do\ not\ use\ it\ to\ create\ baremetal2\ gateway = 已存在管理IP为{0}的Baremetal PXE服务器,请不要使用它来创建Baremetal2网关 management\ ip[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = 管理IP[{0}]既不是IPv4地址也不是有效的主机名 @@ -220,7 +344,9 @@ cluster[uuid\:%s]\ is\ not\ a\ baremetal2\ cluster = 集群[uuid:{0}]不是Bar gateway[uuid\:%s]\ does\ not\ exist = 网关[uuid:{0}]不存在 baremetal2\ gateway[uuid\:%s]\ is\ already\ in\ cluster[uuid\:%s] = Baremetal2网关[uuid:{0}]已在集群[uuid:{1}]中 baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ the\ same\ zone\ as\ cluster[uuid\:%s] = Baremetal2网关[uuid:{0}]与集群[uuid:{1}]不在同一个数据中心中 +cannot\ change\ the\ cluster\ of\ baremetal2\ gateway[uuid\:%s]\ when\ there\ are\ running\ instances\ depending\ on\ it = baremetal2\ instance[uuid\:%s]\ doesn't\ exist,\ cannot\ generate\ its\ console\ url = Baremetal2实例[uuid:{0}]不存在,无法生成其控制台URL +baremetal2\ gateway[uuid\:%s]\ is\ not\ Connected,\ cannot\ generate\ console\ url\ for\ instance[uuid\:%s] = cluster[uuid\:%s]\ hypervisorType\ is\ not\ %s = 集群[uuid:{0}]管理程序类型不是{1} cluster[%s]\ is\ not\ baremetal2\ type = 集群[{0}]不是BareMetal2类型 baremetal2\ instance\ required = @@ -238,49 +364,84 @@ failed\ to\ allocate\ primary\ storage\ in\ clusters[uuids\:%s]\ for\ baremetal2 failed\ to\ allocate\ gateway\ in\ clusters[uuids\:%s]\ for\ baremetal2\ instance[uuid\:%s] = 无法在集群[uuid:{0}]中为BareMetal2实例[uuid:{1}]分配网关 failed\ to\ allocate\ chassis\ in\ clusters[uuids\:%s]\ for\ baremetal2\ instance[uuid\:%s] = 无法在集群[uuid:{0}]中为Baremetal2实例[uuid:{1}]分配机箱 no\ baremetal2\ cluster\ found\ in\ clusters[uuid\:%s] = 在集群[uuid:{0}]中找不到BareMetal2集群 +only\ baremetal2\ clusters[uuid\:%s]\ meet\ the\ needs\ for\ chassis\ and\ gateway,\ but\ they\ have\ no\ provision\ network\ attached = +name[%s]\ is\ invalid,\ the\ name\ requirement\:\ 1~128\ characters,\ support\ uppercase\ and\ lowercase\ letters,\ numbers,\ underscores,\ and\ hyphens;\ It\ can\ only\ start\ with\ uppercase\ and\ lowercase\ letters;\ It\ does\ not\ start\ or\ end\ with\ a\ space\ = only\ support\ vpc\ network\ support\ attach\ eip\ on\ baremetal2\ instance = 仅支持VPC网络支持在Baremetal2实例上附加EIP bare\ metal\ instance\ not\ allowed\ to\ change\ vm\ nic\ network = 不允许裸机实例更改VM NIC网络 current\ operation\ is\ not\ supported\ on\ local\ baremetal\ instance = 本地裸机实例不支持当前操作 not\ supported\ by\ baremetal2\ instance = BareMetal2实例不支持 baremetal2\ instance[uuid\:%s]\ is\ not\ Connected = BareMetal2实例[uuid:{0}]未连接 baremetal2\ instance[uuid\:%s]\ is\ not\ stopped = Baremetal2实例[uuid:{0}]未停止 +baremetal2\ instance[uuid\:%s]\ is\ running\ but\ its\ agent\ is\ not\ Connected = make\ sure\ all\ baremetal2\ gateways\ on\ provision\ network[uuid\:%s]\ are\ Connected = 确保配置网络[uuid:{0}]上的所有Baremetal2网关均已连接 +baremetal2\ instance[uuid\:%s]\ is\ not\ stopped\ can\ not\ change\ its\ chassis\ offering = +baremetal2\ instance[uuid\:%s]\ has\ not\ been\ allocated\ a\ chassis,\ start\ the\ instance\ and\ try\ again = only\ l3\ network\ with\ ip\ version\ %d\ is\ supported\ by\ baremetal2\ instance = Baremetal2实例仅支持IP版本为{0}的三层网络 l2\ network\ type\ %s\ not\ supported\ by\ baremetal2\ instance = 二层网络类型{0}不受Baremetal2实例支持 customMac\ is\ mandatory\ when\ attaching\ l3\ network\ to\ baremetal2\ instance = 将三层网络连接到Baremetal2实例时,CustomMAC是必需的 %s\ is\ not\ valid\ mac\ address = {0}不是有效的MAC地址 duplicated\ mac\ address\ %s = 重复的MAC地址{0} +baremetal2\ instance[uuid\:%s]\ running\ on\ chassis[uuid\:%s],\ which\ doesn't\ have\ non-provisioning\ nic\ with\ mac\ address\ %s = mac\ address\ %s\ has\ already\ been\ used,\ try\ another\ one = MAC地址{0}已被使用,请尝试其他地址 nic\ with\ mac\:%s\ cannot\ be\ attached\ l3Network,\ because\ it\ has\ been\ bonded = MAC为{0}的NIC无法连接到L3Network,因为它已绑定 third\ party\ ceph\ cannot\ mixed\ with\ other\ primary\ storage = 第三方 分布式存储 不能与其他主存储混合 remote\ provision\ instance\ not\ support\ attach\ provision\ nic\ to\ bond = +cluster[uuid\:%s]\ is\ not\ an\ Enabled\ baremetal2\ cluster,\ cannot\ start\ instance[uuid\:%s]\ in\ it = +baremetal2\ gateway[uuid\:%s]\ does\ not\ exist\ or\ is\ not\ Enabled\ or\ Connected = +baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ cluster\ [uuid\:%s] = please\ specify\ chassis\ uuid\ or\ chassis\ offering\ uuid\ to\ start\ baremetal2\ instance[uuid\:%s] = 请指定机箱uuid或机箱提供uuid以启动BareMetal2实例[uuid:{0}] baremetal2\ chassis\ offering[uuid\:%s]\ does\ not\ exist = Baremetal2机箱产品[uuid:{0}]不存在 baremetal2\ chassis\ offering[uuid\:%s]\ is\ not\ Enabled = 未启用Baremetal2机箱产品[uuid:{0}] no\ need\ to\ set\ chassisOfferingUuid\ because\ the\ instance\ has\ been\ assigned\ an\ chassis\ already = 无需设置Chassisofferinguuid,因为实例已分配机箱 no\ need\ to\ set\ chassisOfferingUuid\ because\ the\ instance\ has\ been\ assigned\ an\ chassis\ offering\ already = 无需设置ChassisOfferinguuid,因为已为实例分配了机箱产品 baremetal2\ chassis[uuid\:%s]\ does\ not\ exist = Baremetal2机箱[uuid:{0}]不存在 +baremetal2\ chassis[uuid\:%s]\ is\ not\ belonging\ to\ chassis\ offering[uuid\:%s] = baremetal2\ chassis[uuid\:%s]\ is\ not\ Enabled = 未启用Baremetal2机箱[uuid:{0}] baremetal2\ chassis[uuid\:%s]\ has\ already\ been\ allocated = 已分配Baremetal2机箱[uuid:{0}] +zone[uuid\:%s]\ is\ specified\ but\ it's\ not\ Enabled,\ can\ not\ create\ baremetal2\ instance\ from\ it = +cluster[uuid\:%s]\ is\ specified\ but\ it's\ not\ an\ Enabled\ baremetal2\ cluster,\ can\ not\ create\ baremetal2\ instance\ from\ it = neither\ chassisUuid\ nor\ chassisOfferingUuid\ is\ set\ when\ create\ baremetal2\ instance = 创建BareMetal2实例时,Chassisuuid和ChassisOfferuuid均未设置 do\ not\ set\ chassisUuid\ and\ chassisOfferingUuid\ at\ the\ same\ time = 不要同时设置Chassisuuid和ChassisOfferuuid +baremetal2\ chassis[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = +baremetal2\ chassis[uuid\:%s]\ is\ not\ Available,\ can't\ create\ baremetal2\ instance\ from\ it = +baremetal2\ chassis\ offering[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = +baremetal2\ gateway[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = +baremetal2\ gateway[uuid\:%s]\ is\ not\ Connected,\ can't\ create\ baremetal2\ instance\ from\ it = +baremetal2\ gateway[uuid\:%s]\ is\ not\ in\ the\ same\ cluster\ with\ chassis[uuid\:%s] = image\ cannot\ be\ empty\ unless\ chassis\ is\ in\ direct\ mode = 除非机箱处于直接模式,否则镜像不能为空 direct\ mode\ not\ support\ choose\ image = 直接模式不支持选择镜像 image[uuid\:%s]\ does\ not\ exist = 镜像[uuid:{0}]不存在 Chassis\ disk[%s]\ not\ have\ enough\ capacity\ for\ image[%s] = 机箱磁盘[{0}]没有足够的容量用于镜像[{1}] +image[uuid\:%s]\ is\ not\ Enabled,\ can't\ create\ baremetal2\ instance\ from\ it = +image[uuid\:%s]\ is\ not\ Ready,\ can't\ create\ baremetal2\ instance\ from\ it = +image[uuid\:%s]\ is\ of\ mediaType\:\ %s,\ only\ RootVolumeTemplate\ can\ be\ used\ to\ create\ baremetal2\ instance = +image[uuid\:%s]\ is\ of\ format\:\ %s,\ only\ %s\ can\ be\ used\ to\ create\ baremetal2\ instance = +image[uuid\:%s]\ is\ not\ baremetal2\ image,\ can't\ create\ baremetal2\ instance\ from\ it = +only\ image\ with\ boot\ mode\ %s\ is\ supported\ to\ create\ baremetal2\ instance = different\ boot\ mode\ between\ the\ image\ and\ chassis/offering = 镜像和机箱/产品之间的引导模式不同 +the\ architecture\ of\ baremetal2\ cluster[arch\:%s]\ and\ image[arch\:%s]\ don't\ match = +not\ all\ disk\ offerings[uuids\:%s]\ are\ Enabled,\ can\ not\ create\ baremetal2\ instance\ from\ them = +the\ primary\ storage[%s]\ of\ the\ root\ volume\ and\ the\ primary\ storage[%s]\ of\ the\ data\ volume\ are\ not\ in\ the\ same\ cluster = cannot\ decide\ which\ zone\ the\ baremetal2\ instance\ should\ be\ created\ in = 无法确定应在哪个数据中心中创建BareMetal2实例 baremetal2\ instance[uuid\:%s]\ is\ either\ not\ exist\ or\ not\ Connected,\ cannot\ change\ its\ password = Baremetal2实例[uuid:{0}]不存在或未连接,无法更改其密码 +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ in\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage.\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ in\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = no\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s]\ contains\ the\ ISO[uuid\:%s] = 在数据中心 {0} 中找不到存放 ISO 镜像 {1} 的镜像存储 Cannot\ find\ BareMetal2\ Instance[uuid\:%s],\ it\ may\ have\ been\ deleted = %s\ can\ only\ be\ created\ or\ deleted = 只能创建或删除{0} %s\ can\ only\ be\ created\ or\ deleted\ when\ the\ baremetal2\ instance\ is\ Running = 只能在运行BareMetal2实例时创建或删除{0} +there\ already\ exists\ a\ baremetal2\ provision\ network\ with\ dhcpInterface\ \=\ %s,\ dhcpRangeStartIp\ \=\ %s,\ dhcpRangeEndIp\ \=\ %s,\ dhcpRangeNetmask\ \=\ %s,\ dhcpRangeGateway\ \=\ %s = +cannot\ update\ baremetal2\ provision\ network[uuid\:%s]\ dhcp\ configuration\ when\ there\ are\ instances\ depending\ on\ it = baremetal2\ provision\ network\ dhcp\ range\ netmask\ %s\ is\ invalid = Baremetal2设置网络DHCP范围网络掩码{0}无效 baremetal2\ provision\ network\ start\ ip\ %s\ and\ stop\ ip\ %s\ do\ not\ belong\ to\ the\ same\ subnet = Baremetal2配置网络启动IP{0}和停止IP{1}不属于同一子网 +cannot\ delete\ baremetal2\ provision\ network[uuid\:%s]\ when\ there\ are\ instances\ depending\ on\ it = cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ non-baremetal2\ cluster[uuid\:%s] = 无法将BareMetal2设置网络[uuid:{0}]连接到非BareMetal2集群[uuid:{1}] baremetal2\ provision\ network[uuid\:%s]\ is\ already\ attached\ to\ cluster[uuid\:%s] = Baremetal2配置网络[uuid:{0}]已连接到集群[uuid:{1}] cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s]\ because\ the\ cluster\ already\ have\ one = 无法将BareMetal2设置网络[uuid:{0}]附加到集群[uuid:{1}],因为该集群已有一个网络 cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s]\ because\ they\ are\ not\ in\ the\ same\ zone = 无法将BareMetal2配置网络[uuid:{0}]附加到集群[uuid:{1}],因为它们不在同一数据中心中 +cannot\ attach\ baremetal2\ provision\ network[uuid\:%s]\ to\ cluster[uuid\:%s],\ because\ we\ need\ to\ make\ sure\ that\ every\ gateway\ attached\ to\ the\ clusters\ that\ have\ the\ same\ provision\ network\ attached = +provision\ network\ should\ not\ have\ the\ same\ interface\ name\ with\ l2\ networks\ that\ are\ already\ attached\ to\ the\ cluster = +cannot\ detach\ baremetal2\ provision\ network[uuid\:%s]\ when\ there\ are\ running\ instances\ depending\ on\ it = networkUuids\ is\ empty = 网络uuid为空 not\ all\ baremetal2\ provision\ networks\ exist\ in\ %s = {0}中并不存在所有BareMetal2配置网络 failed\ to\ update\ provision\ network[uuid\:%s]\ in\ gateway[uuid\:%s]\:\ %s = 无法更新设置网络[uuid:{0}](在网关[uuid:{1}]中):{2} @@ -313,8 +474,85 @@ please\ set\ the\ correct\ priceUserConfig,\ for\ example\:\ priceUserConfig\:{\ please\ set\ the\ correct\ priceUserConfig,\ for\ example\:\ priceUserConfig\:{\\nvolume\:{\\npriceKeyName\:\\"priceKeyName\\"}} = 请设置正确的priceUserConfig,例如:priceUserConfig:'{\nVolume:{\nPriceKeyName:\“ priceKeyName\”}'} unsupported\ billing\ resource\ type\ [%s] = 不支持的计费资源类型[{0}] +# In Module: block-primary-storage +primaryStorageUuid\ is\ mandatory\ when\ download\ image\ cache = +the\ block\ primary\ storage[uuid\:%s,\ name\:%s]\ can\ not\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = 块主存储[uuid:{0}, name:{1}] 在附加的集群中找不到可用于实例化卷的可用主机 +fail\ to\ find\ a\ host\ to\ map\ for\ volume\ %s = +fail\ to\ find\ install\ path\ for\ downloading\ volume\:\ %s,\ please\ prepare\ it\ before\ downloading = +fail\ to\ find\ a\ host\ to\ download\ volume\ %s = +Fail\ to\ get\ host\ initiator\ ref,\ please\ reconnect\ this\ host\:%s = +Block\ primary[uuid\:\ %s]\ has\ not\ attached\ to\ any\ clusters = +Fail\ to\ connect\ block\ primary[uuid\:\ %s],\ because\ no\ connected\ host = +Failed\ to\ attach\ block\ primary[uuid\:\ %s]\ to\ cluster[uuid\:\ %],\ because\ no\ connected\ host\ in\ cluster = +backing\ up\ snapshots\ to\ backup\ storage\ is\ a\ depreciated\ feature,\ which\ doesn't\ support\ on\ block\ primary\ storage = +fail\ to\ find\ cluster\ for\ commit\ volume\ on\ ps\:%s = +fail\ to\ find\ host\ for\ commit\ volume\:%s = +KVM\ host[uuid\:\ %s]\ fails\ to\ be\ added\ into\ block\ primary\ storage[uuid\:\ %s],\ %s = +iso[uuid\:\ %s]\ is\ attached\ to\ vm[uuid\:\ ],\ but\ iso\ is\ not\ on\ any\ block\ storage,\ you\ have\ to\ detach\ it,\ before\ migrate\ vm = +fail\ to\ find\ block\ scsi\ lun\ for\ volume\:\ %s = +fail\ to\ exchange\ block\ scsi\ lun\ info\:%s = +fail\ to\ clean\ up\ after\ detach\ volume = +not\ support\ take\ volumes\ snapshots\ on\ multiple\ ps\ when\ including\ ceph = 当包含 Ceph 时,不支持在多个主存储上同时对卷进行快照 +fail\ to\ map\ lun\ to\ host\ before\ attach\ volume\ to\ vm = +primary\ storage\ uuid\ is\ mandatory\ when\ delete\ lun = +initiatorName\ %s\ is\ occupied\ by\ other\ host,\ please\ regenerate\ initiator\ of\ host\ %s = +failed\ to\ download[%s]\ from\ BackupStorage[hostname\:%s]\ to\ block\ primary\ storage[uuid\:%s,\ path\:%s],\ %s = +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ backend\ device\ is\ null = +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ ps\ host\ ref\ is\ null = +can\ not\ execute\ map\ lun\ to\ host\ flow,\ because\ ps\ host\ ref\ metadata\ is\ empty = +can\ not\ execute\ map\ lun\ to\ host\ flow,\ invalid\ lun\ id = +can\ not\ execute\ map\ lun\ to\ host\ flow,\ invalid\ lun\ lun\ type = +fail\ to\ sync\ access\ zones\ because\ %s = +fail\ to\ get\ access\ zone's\ subnet\ because\ %s = +fail\ to\ query\ all\ hosts,\ because\ of\ %s = +fail\ to\ query\ hosts\ %s,\ because\ of\ %s = +fail\ to\ add\ host\ %s\ into\ hostGroup\ %s,\ because\ of\ %s = +fail\ to\ delete\ host\ %s,\ because\ of\ %s = +fail\ to\ delete\ host\ group\ %s,\ because\ of\ %s = +host\ id\ is\ mandatory\ but\ get\:%s = +fail\ to\ delete\ initiator\ %s,\ because\ of\ %s = +fail\ to\ query\ host\ group,\ because\ of\ %s = +fail\ to\ add\ host\ group\:\ %s,\ error\ message\:%s\ = +fail\ to\ query\ lun\ \:\ %s,\ error\ message\:%s\ = +fail\ to\ query\ lun\ by\ path\:\ %s,\ error\ message\:%s\ = +fail\ to\ update\ lun\ name\:\ %s,\ error\ message\:%s\ = +fail\ to\ create\ lun\ name\:\ %s,\ can\ not\ find\ root\ cause = +unable\ to\ do\ the\ operation\ because\ the\ lun[%s]\ has\ been\ occupied = +fail\ to\ create\ lun\ name\:\ %s,\ error\ message\:%s\ = +fail\ to\ query\ lun\ %s,\ because\ of\ %s = +lun\ id\ is\ mandatory\ when\ query\ lun\ map = +fail\ to\ query\ lun\ map\ for\ host\ group\ %s,\ because\ of\ %s = +fail\ to\ query\ lun\ map\ %s,\ because\ of\ %s = +fail\ to\ get\ cluster\ info,\ because\ of\ %s = +fail\ to\ map\ lun\ %s\ to\ host\ group\ %s,\ because\ of\ %s = +lun\ map\ id\ is\ mandatory\ but\ get\:%s = +fail\ to\ delete\ lun\ map\ %s,\ because\ of\ %s = +lun\ id\ is\ mandatory\ but\ get\:%s = +fail\ to\ delete\ lun\ %s,\ because\ of\ %s = +fail\ to\ get\ storage\ pool\ %s,\ because\ of\ %s = +fail\ to\ create\ snapshot\ for\ lun\ %s,\ because\ of\ %s = +fail\ to\ query\ snapshots\ %s,\ because\ of\ %s = +snapshot\ id\ is\ mandatory\ but\ get\:%s = +fail\ to\ delete\ snapshot\ %s,\ because\ of\ %s = +fail\ to\ revert\ snapshot\:%s,\ because\ of\:\ %s = +fail\ to\ check\ lun\ %s\ session\ state\ ,\ because\ of\:\ %s = +fail\ to\ get\ lun\ %s\ maps,\ because\ of\:\ %s = +fail\ to\ get\ lun\ %s\ remain\ created\ lun\ number,\ because\ of\:\ %s = +There\ is\ no\ way\ to\ get\ lun\ map\ id,\ we\ just\ return\ as\ failure = +lun\ map\ id\ is\ mandatory\ can\ not\ be\ null,\ neither\ 0 = +lun\ id\ is\ illegal = +fail\ to\ add\ host\ group\:\ %s = +fail\ to\ add\ host\:\ %s = +lun\ can\ not\ be\ found = +XStor\ cluster\ is\ unhealthy,\ cluster\ info[cluster_data_state\:\ %s,\ cluster_healthy_state\:\ %s,\ cluster_running_state\:\ %s] = +illegal\ lun\ id = +fail\ to\ get\ image\ cache\ lun\ info = + # In Module: cbd +invalid\ mdsUrl[%s],\ the\ sshUsername\:sshPassword\ part\ is\ invalid.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = invalid\ mdsUrl[%s].\ SSH\ username\ and\ password\ must\ be\ separated\ by\ '\:'\ and\ cannot\ be\ empty.\ A\ valid\ monUrl\ format\ is\ %s = +invalid\ mdsUrl[%s],\ hostname\ cannot\ be\ null.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = +invalid\ mdsUrl[%s],\ the\ ssh\ port\ is\ greater\ than\ 65535\ or\ smaller\ than\ 1.\ A\ valid\ mdsUrl\ is\ in\ format\ of\ %s = # In Module: cbt Cbt\ task\ not\ found[uuid\:\ %s] = @@ -375,6 +613,8 @@ No\ CDP\ backup\ storage\ found\ for\ VM\:\ %s = 未找到虚拟机{0}的CDP备 No\ CdpBackupFactory\ of\ type[%s]\ found = 未找到类型为[{0}]的CDPBackupFactory CDP\ task[uuid\:\ %s]\ not\ found = 未找到CDP任务[uuid:{0}] unexpected\ task\ type\:\ %s = 意外的任务类型:{0} +The\ operation\ has\ volume[uuid\:\ %s]\ that\ will\ take\ chain\ type\ snapshot.\ Therefore,\ you\ could\ not\ do\ this\ operation\ when\ a\ CDP\ task\ is\ running\ on\ the\ VM\ instance. = +Could\ not\ attach\ volume.The\ VM\ instance\ is\ running\ a\ CDP\ task.\ After\ the\ volume\ is\ attached,\ the\ capacity\ required\ for\ full\ backup\ will\ exceed\ the\ CDP\ task\ planned\ size.\ Please\ plan\ the\ size\ properly\ and\ try\ again. = The\ VM[%s]\ for\ volume[%s]\ is\ running\ CDP,\ cannot\ resize\ now. = 卷[{1}]的VM[{0}]正在运行CDP,现在无法调整大小。 No\ VM\ found\ for\ CDP\ task[uuid\:\ %s] = 未找到CDP任务[uuid:{0}]的VM BackupStorage[uuid\:\ %s]\ already\ been\ deleted = BackupStorage[uuid:{0}]已删除 @@ -393,6 +633,7 @@ unexpected\ volume[uuid\:\ %s]\ size\:\ %d = 意外卷[uuid:{0}]大小:{1} resize\ volume[uuid\:\ %s]\ failed\:\ %s = 调整卷[uuid:{0}]大小失败:{1} volume[uuid\:\ %s]\ has\ unexpected\ path\:\ %s = 卷[uuid:{0}]具有意外路径:{1} Available\ License\ not\ found,\ please\ apply\ addon\ license\ for\ product\ CDP. = +Insufficient\ CDP\ VM\ number\ licensed.\ Your\ license\ permits\ %d\ CDP\ VM,\ there\ are\ %d\ CDP\ VM\ used.\ You\ can\ stop\ or\ disable\ some\ CDP\ tasks\ or\ apply\ a\ new\ license. = kvmagent\ restarted = KVMAGENT重新启动 kvmagent\ no\ response\ %d\ times = KVMAgent无响应{0}次 recoverVm\:\ host\ uuid\ is\ not\ provided\ and\ original\ host\ is\ not\ found\ for\ VM[uuid\:\ %s] = RecoverVM:未提供物理机uuid,并且未找到VM[uuid:{0}]的原始物理机 @@ -419,20 +660,28 @@ The\ problem\ may\ be\ caused\ by\ an\ incorrect\ user\ name\ or\ password\ or\ all\ ceph\ mons\ are\ Disconnected\ in\ ceph\ backup\ storage[uuid\:%s] = 所有在Ceph镜像服务器监控节点[uuid:{0}]的监控节点都处于失联状态 CephMon[hostname\:%s]\ not\ found\ on\ backup\ storage[uuid\:%s] = 在备份存储[uuid:{1}]上找不到cephmon[物理机名:{0}] unable\ to\ connect\ to\ the\ ceph\ backup\ storage[uuid\:%s],\ failed\ to\ connect\ all\ ceph\ monitors. = +there\ is\ another\ CEPH\ backup\ storage[name\:%s,\ uuid\:%s]\ with\ the\ same\ FSID[%s],\ you\ cannot\ add\ the\ same\ CEPH\ setup\ as\ two\ different\ backup\ storage = image[uuid\:\ %s]\ is\ not\ on\ backup\ storage[uuid\:%s,\ name\:%s] = 镜像[uuid:{0}]不在备份存储[uuid:{1},名称:{2}]上 unable\ to\ add\ mon\ to\ ceph\ backup\ storage = ceph\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = +cannot\ update\ status\ of\ the\ ceph\ backup\ storage\ mon[uuid\:%s],\ it\ has\ been\ deleted.This\ error\ can\ be\ ignored = Ceph\ bs[uuid\=%s]\ pool\ name\ not\ found = 找不到Ceph BS[uuid={0}]池名称 delete\ volume\ chain\ error,\ continue\ to\ delete = +the\ backup\ storage[uuid\:%s,\ name\:%s,\ fsid\:%s]\ is\ not\ in\ the\ same\ ceph\ cluster\ with\ the\ primary\ storage[uuid\:%s,\ name\:%s,\ fsid\:%s] = fsid\ is\ not\ same\ between\ ps[%s]\ and\ bs[%s],\ create\ template\ is\ forbidden. = 在主存储和镜像服务器中fsid不是一样的,禁止创建模版。 all\ monitors\ cannot\ execute\ http\ call[%s] = 所有的监控节点都无法执行http call[{0}] +unable\ to\ connect\ to\ the\ ceph\ primary\ storage[uuid\:%s],\ failed\ to\ connect\ all\ ceph\ monitors. = ceph\ primary\ storage[uuid\:%s]\ may\ have\ been\ deleted. = 分布式存储[uuid:{0}]可能已经被删除 the\ fsid\ returned\ by\ mons\ are\ mismatching,\ it\ seems\ the\ mons\ belong\ to\ different\ ceph\ clusters\:\\n = 监控节点返回的fsid不匹配,似乎监控节点属于不同的ceph集群 +there\ is\ another\ CEPH\ primary\ storage[name\:%s,\ uuid\:%s]\ with\ the\ same\ FSID[%s],\ you\ cannot\ add\ the\ same\ CEPH\ setup\ as\ two\ different\ primary\ storage = +the\ ceph\ primary\ storage[uuid\:%s,\ name\:%s]\ is\ down,\ as\ one\ mon[uuid\:%s]\ reports\ an\ operation\ failure[%s] = unable\ to\ connect\ mons = unable\ to\ add\ mon\ to\ ceph\ primary\ storage = +the\ mon[ip\:%s]\ returns\ a\ fsid[%s]\ different\ from\ the\ current\ fsid[%s]\ of\ the\ cep\ cluster,are\ you\ adding\ a\ mon\ not\ belonging\ to\ current\ cluster\ mistakenly? = operation\ error,\ because\:\ failed\ to\ get\ response = 操作错误,原因:无法获取响应 backing\ up\ snapshots\ to\ backup\ storage\ is\ a\ depreciated\ feature,\ which\ will\ be\ removed\ in\ future\ version = 将快照备份到备份存储是一项过时的功能,在未来版本中将被删除 cannot\ reinit\ rootvolume\ [%s]\ because\ image\ [%s]\ has\ been\ deleted\ and\ imagecache\ cannot\ be\ found = 无法重新初始化RootVolume[{0}],因为镜像[{1}]已被删除并且找不到ImageCache +cannot\ find\ backupstorage\ to\ download\ image\ [%s]\ to\ primarystorage\ [%s]\ due\ to\ lack\ of\ Ready\ and\ accessible\ image = allocated\ url\ not\ found = 未找到分配的URL invalid\ allocated\ url\:%s = 分配的URL无效:{0} cannot\ find\ any\ Connected\ ceph\ mon\ for\ the\ primary\ storage[uuid\:%s] = 无法为分布式存储[uuid:{0}]找到一台处于Connected状态的的监控节点 @@ -443,7 +692,10 @@ rootVolume[%s]\ is\ already\ in\ use(ceph\ rbd\ image[%s]\ already\ has\ watcher cannot\ find\ cephPrimaryStorage\ pool[poolName\=%s] = 找不到CephPrimaryStorage池[PoolName={0}] cephPrimaryStorage\ pool[poolName\=%s]\ available\ virtual\ capacity\ not\ enough\ for\ size\ %s = CephPrimary存储池[PoolName={0}]可用虚拟容量不足,无法满足大小{1} cannot\ allocate\ pool\ for\ primaryStorage[%s],\ purpose\:\ %s = 无法为主存储[{0}]分配池,目的:{1} +cannot\ update\ status\ of\ the\ ceph\ primary\ storage\ mon[uuid\:%s],\ it\ has\ been\ deleted.This\ error\ can\ be\ ignored = Ceph\ ps[uuid\=%s]\ root\ pool\ name\ not\ found = 找不到Ceph PS[uuid={0}]根池名称 +invalid\ uri,\ correct\ example\ is\ ceph\://$POOLNAME/$VOLUMEUUID\ or\ volume\://$VOLUMEUUID\ or\ volumeSnapshotReuse\://$SNAPSHOTUUID = +required\ ceph\ pool[uuid\:%s]\ cannot\ satisfy\ conditions\ [availableSize\ >\ %s\ bytes],\ current\ available\ size\ %s = cannot\ find\ ceph\ pool\ [%s]\ related\ osdgroup = 找不到Ceph池[{0}]相关的OSDGroup # In Module: cloudformation @@ -480,6 +732,7 @@ Some\ actions\ are\ invalid = 某些操作无效 no\ root\ element\ found,\ please\ check\ your\ cfn\ formation! = 找不到根元素,请检查您的CFN结构! Wrong\ json\ format,\ causes\:\ %s = 错误的JSON格式,导致:{0} CfnRootDecoder's\ weight\ must\ between\ 0-100,\ 0\ means\ decode\ first,\ default\ is\ 50 = cfnRootDecoder的权重必须介于0-100之间,0表示先解码,默认值为50 +Condition\ key\:\ %s\ only\ support\ 1\ element\ in\ the\ json\ object\ of\ value,\ but\ got\ %d\ elements! = Value\ must\ be\ boolean\ in\ 'Condition'\ field = “条件”字段中的值必须为布尔值 Only\ support\ ZStack\ Template\ Functions\ in\ 'Condition'\ field! = 仅支持“条件”字段中的ZStack模板函数! Condition\ body\ cannot\ support\ json\ null\ or\ array! = 条件体不支持JSON NULL或数组! @@ -509,6 +762,7 @@ element\ is\ null! = 元素为空! # In Module: compute cannot\ find\ root\ volume\ of\ vm[uuid\:%s] = 找不到 VM[uuid:{0}] 的根硬盘 +the\ backup\ storage[uuid\:%s,\ type\:%s]\ requires\ bound\ primary\ storage,\ however,\ the\ primary\ storage\ has\ not\ been\ added = No\ host\ with\ %s\ found = 找不到具有 {0} 的主机 either\ volumeUuid\ or\ volumeSnapshotUuid\ must\ be\ set = 硬盘 UUID 或快照 UUID 需要被设置 zoneUuids,\ clusterUuids,\ hostUuids\ must\ at\ least\ have\ one\ be\ none-empty\ list,\ or\ all\ is\ set\ to\ true = 数据中心 UUID、集群 UUID、主机 UUID 至少有一项不为空,或者 ''all'' 字段设置为 true @@ -528,16 +782,19 @@ webssh\ server\ is\ not\ running. = WebSSH 服务器未运行 there\ has\ been\ a\ host\ having\ managementIp[%s] = 已经存在一个管理 IP 是 {0} 的主机 managementIp[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = 管理 IP[{0}] 既不是有效的 IPv4 地址也不是有效的主机名 can\ not\ maintain\ host[uuid\:%s,\ status\:%s]which\ is\ not\ Connected = 只能对已连接状态的主机 [uuid:{0}, status:{1}] 进行维护操作 +the\ password\ for\ the\ physical\ machine\ [%s]\ is\ empty.\ please\ set\ a\ password = path\ cannot\ be\ empty = path\ must\ be\ an\ absolute\ path\ (start\ with\ '/')\\" = invalid\ path\ traversal\ detected = mountPoint\ cannot\ be\ empty = mount\ point\ must\ be\ an\ absolute\ path\ (start\ with\ '/') = path\ traversal\ detected\ in\ mount\ point = +the\ mount\ point\ must\ strictly\ follow\ the\ security\ pattern\:\ '^[a-zA-Z0-9_\\-./]+$'.\ this\ requires\:\ \\n1.\ only\ alphanumeric\ characters\ [a-z,\ A-Z,\ 0-9]\\n2.\ limited\ special\ characters\:\ hyphen\ (-),\ underscore\ (_),\ period\ (.),\ and\ forward\ slash\ (/)\\n3.\ must\ be\ a\ valid\ absolute\ path\ starting\ with\ '/'\\n\\nvalid\ examples\:\\n\ \ /mnt/data\\n\ \ /volumes/drive01\\n\ \ /backup-2023.disk\\n\\ninvalid\ value\ detected\:\ '%s' = mountPoint\ should\ not\ end\ with\ '/'\ except\ root\ directory = host[uuid\:%s,\ name\:%s]\ is\ in\ status[%s],\ cannot\ perform\ required\ operation = unable\ to\ do\ the\ operation\ because\ the\ host\ is\ in\ status\ of\ Disconnected = 主机失联,无法进行操作 host[uuid\:%s,\ name\:%s]\ is\ in\ state[%s],\ cannot\ perform\ required\ operation = 主机 [uuid:{0}, name:{1}] 处于状态 [{2}] 中,不能处理该请求 +host[%s]\ does\ not\ have\ ipmi\ device\ or\ ipmi\ does\ not\ have\ address.After\ config\ ipmi\ address,\ please\ reconnect\ host\ to\ refresh\ host\ ipmi\ information = Host[%s]\ is\ in\ maintenance\ state,\ VM\ on\ this\ host\ should\ be\ migrated = failed\ to\ migrate\ vm[uuids\:%s]\ on\ host[uuid\:%s,\ name\:%s,\ ip\:%s],\ will\ try\ stopping\ it. = 无法迁移主机 [uuid:{1},名称:{2},IP:{3}] 上的虚拟机 [uuid:{0}],将尝试停止它 host\ is\ connecting,\ ping\ failed = 主机正在连接, 不能进行 ping 操作 @@ -555,12 +812,14 @@ cannot\ find\ host[uuid\:%s],\ it\ may\ have\ been\ deleted = cluster[uuid\:%s]\ is\ not\ existing = 集群 {0} 不存在 after\ connecting,\ host[name\:%s,\ ip\:%s]\ returns\ a\ null\ architecture = 内部错误: 重连后, 主机 {0} [地址为 {1}] 获取不到架构数据 cluster[uuid\:%s]'s\ architecture\ is\ %s,\ not\ match\ the\ host[name\:%s,\ ip\:%s]\ architecture\ %s = 集群 {0} 的架构为 {1}, 与主机 [名称 {2}; IP {3}] 的架构 {4} 不匹配 +failed\ to\ get\ disk\ devices,\ because\ [stderr\:%s,\ stdout\:%s,\ exitErrorMessage\:%s] = mountPoint\ %s\ is\ already\ mount\ on\ device\ %s = device\ %s\ is\ already\ mount\ on\ mountPoint\ %s = failed\ to\ get\ UUID\ for\ device\ %s = no\ running\ api[%s]\ task\ on\ hosts = 主机上没有 API {0} 任务在运行 primary\ storage[uuid\:%s]\ becomes\ disconnected,\ the\ host\ has\ no\ connected\ primary\ storage\ attached = 数据存储 {0} 失联, 该主机已经没有可用的数据存储了 current\ vm\ instance\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s],\ allowed\ states\ are\ %s = +cpu\ topology\ is\ not\ correct,\ cpuNum[%s],\ configured\ cpuSockets[%s],\ cpuCores[%s],\ cpuThreads[%s];\ Calculated\ cpuSockets[%s],\ cpuCores[%s],\ cpuThreads[%s] = the\ host[uuid\:%s]\ is\ not\ connected = 主机机 {0} 未连接 VM[uuid\:%s]\ has\ attached\ ISO[uuid\:%s] = 虚拟机 {0} 已经加载了 ISO {1} All\ vm[uuid\:%s]\ CD-ROMs\ have\ mounted\ ISO = 虚拟机 {0} 的所有 CD-ROM 都已装载了 ISO @@ -580,6 +839,7 @@ creation\ rely\ on\ image\ cache[uuid\:%s,\ locate\ ps\ uuids\:\ [%s]],\ cannot\ failed\ to\ allocate\ root\ volume\ to\ the\ primary\ storage[%s] = \ Can\ not\ find\ the\ vm's\ host,\ please\ start\ the\ vm[%s],\ then\ mount\ the\ disk = 未找到虚拟机所在的主机,请手动重启虚拟机[{0}]并手动挂载硬盘 null\ state\ of\ the\ vm[uuid\:%s]\ on\ the\ host[uuid\:%s] = +cannot\ find\ the\ iso[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ if\ the\ backup\ storage\ is\ attached\ to\ the\ zone\ where\ the\ VM[name\:\ %s,\ uuid\:%s]\ is\ running\\n2.\ if\ the\ backup\ storage\ is\ in\ connected\ status,\ if\ not,\ try\ reconnecting\ it = hostname\ is\ empty = %s\ is\ not\ a\ valid\ Windows\ NetBIOS\ hostname = %s\ is\ a\ reserved\ Windows\ NetBIOS\ hostname = @@ -599,6 +859,7 @@ unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ a unable\ to\ change\ to\ a\ non-guest\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = 无法更改为非来宾分布式端口组。分布式端口组[uuid:{0}]已挂载到虚拟机[uuid:{1}] unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ disabled = 无法更改为分布式端口组。分布式端口组[uuid:{0}]已禁用 unable\ to\ change\ to\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ a\ system\ network\ and\ vm\ is\ a\ user\ vm = 无法更改为分布式端口组。分布式端口组[uuid:{0}]是系统网络,但虚拟机是一个用户虚拟机 +unable\ to\ change\ to\ L3\ network[uuid\:%s]\ whose\ l2Network\ is\ not\ attached\ to\ the\ host[uuid\:%s] = the\ image[name\:%s,\ uuid\:%s]\ is\ an\ ISO,\ rootDiskSize\ must\ be\ set = 镜像[名称:{0}, uuid:{1}]是一个ISO, 必须设置硬盘大小 Can\ not\ create\ CD-ROM\ for\ vm[uuid\:%s]\ which\ is\ in\ state[%s]\ = 无法为处于状态[{1}]的虚拟机[uuid:{0}]创建CD-ROM Current\ platform\ %s\ not\ support\ update\ nic\ driver\ yet = 当前平台{0}尚不支持更新网卡驱动类型 @@ -620,6 +881,7 @@ ipv4\ address\ cannot\ be\ empty\ when\ l3\ is\ IPAM\ enabled = ipv6\ address\ cannot\ be\ empty\ when\ l3\ is\ IPAM\ enabled = ipv6\ prefix\ must\ be\ a\ number,\ but\ got\ [%s] = the\ VM[uuid\:%s]\ has\ no\ nic\ on\ the\ L3\ network[uuid\:%s] = 虚拟机[uuid:{0}]在分布式端口组[uuid:{1}]上没有任何网卡 +could\ not\ delete\ static\ ip\ [%s]\ for\ vm\ [uuid\:%s]\ because\ it\ does\ not\ exist = dns[%s]\ should\ be\ ipv%s\ address = DNS[{0}]应该是ipv{1}地址 size\ of\ dns\ list\ should\ not\ exceed\ 3 = DNS列表的大小不能超过3 vmNicUuid\ should\ be\ set\ for\ Windows\ vm = Windows虚拟机的网卡UUID应被设置 @@ -643,6 +905,7 @@ unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ at unable\ to\ attach\ a\ non-guest\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = 无法挂载非来宾分布式端口组。分布式端口组[uuid:{0}]已挂载到虚拟机[uuid:{1}] unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ disabled = 无法挂载分布式端口组,分布式端口组[uuid:{0}]已禁用 unable\ to\ attach\ a\ L3\ network.\ The\ L3\ network[uuid\:%s]\ is\ a\ system\ network\ and\ vm\ is\ a\ user\ vm = 无法挂载分布式端口组。分布式端口组[uuid:{0}]是系统网络,但虚拟机是一个用户虚拟机 +unable\ to\ attach\ L3\ network[uuid\:%s]\ to\ VM[uuid\:%s]\ whose\ l2Network\ is\ not\ attached\ to\ the\ host[uuid\:%s] = unable\ to\ attach\ the\ nic.\ The\ vm[uuid\:\ %s]\ is\ not\ Running\ or\ Stopped;\ the\ current\ state\ is\ %s = 无法挂载网卡。虚拟机[uuid: {0}]既不在运行也不在停止状态。当前状态为{1} unable\ to\ attach\ the\ nic.\ The\ nic\ has\ been\ attached\ with\ vm[uuid\:\ %s] = 无法挂载网卡。网卡已被挂载到虚拟机[uuid:{0}] unable\ to\ attach\ the\ nic.\ Its\ L3\ network[uuid\:%s]\ is\ already\ attached\ to\ the\ vm[uuid\:\ %s] = 无法挂载网卡。其分布式端口组[uuid:{0}]已挂载到虚拟机[uuid:{1}] @@ -656,7 +919,6 @@ unable\ to\ detach\ a\ L3\ network.\ The\ vm[uuid\:\ %s]\ is\ not\ Running\ or\ vm[uuid\:%s]\ can\ only\ attach\ volume\ when\ state\ is\ Running\ or\ Stopped,\ current\ state\ is\ %s = 虚拟机[uuid:{0}]只能在运行或者停止状态时挂载盘,当前状态为{1} image\ mediaType\ is\ ISO\ but\ missing\ root\ disk\ settings = 镜像媒体类型为ISO,但缺少根磁盘设置 Unexpected\ root\ disk\ settings = 意外的根磁盘设置 -the\ primary\ storage[%s]\ of\ the\ root\ volume\ and\ the\ primary\ storage[%s]\ of\ the\ data\ volume\ are\ not\ in\ the\ same\ cluster = 根卷的主存储[{0}]和数据硬盘的主存储[{1}]不在同一集群中 Unexpected\ data\ disk\ settings.\ dataDiskSizes\ need\ to\ be\ greater\ than\ 0 = 意外的数据磁盘设置。数据磁盘大小需要大于0 missing\ root\ disk = 缺少根磁盘设置 virtio\ tag\ is\ not\ allowed\ when\ virtio\ is\ false = 当virtio为false时,不允许使用virtio标签 @@ -685,6 +947,7 @@ failed\ to\ delete\ templated\ vm\ [%s] = failed\ to\ delete\ the\ cache\ vmInstance[uuid\:%s]\ of\ templated\ vmInstance[uuid\:%s] = VM[uuid\:%s]\ state\ is\ not\ Running. = VM[uuid:{0}]状态未运行 no\ available\ empty\ cdrom\ for\ VM[uuid\:%s] = +the\ ISO[uuid\:%s]\ is\ on\ backup\ storage\ that\ is\ not\ compatible\ of\ the\ primary\ storage[uuid\:%s]\ where\ the\ VM[name\:%s,\ uuid\:%s]\ is\ on = failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor. = 更新虚拟机 {0} 失败 Failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor\:\ The\ modification\ of\ some\ properties\ failed = the\ vm\ with\ the\ name\ [%s]\ already\ exists = @@ -693,11 +956,15 @@ failed\ to\ update\ vm[uuid\=%s]\ on\ hypervisor\:\ The\ modification\ of\ some\ ISO[uuid\:%s]\ is\ not\ attached\ to\ VM[uuid\:%s] = ISO[uuid:{0}]未被加载到虚拟机[uuid:{1}] Detaching\ volume\ is\ not\ allowed\ when\ VM[uuid\=%s]\ is\ in\ state[%s] = failed\ to\ detach\ volume[uuid\=%s]\ of\ VM[uuid\=%s] = 虚拟机 {1} 删除卷 {0} 失败 +Unable\ to\ find\ L3Network[uuid\:%s]\ to\ start\ the\ current\ vm,\ it\ may\ have\ been\ deleted,\ Operation\ suggestion\:\ delete\ this\ vm,\ recreate\ a\ new\ vm = One\ vm\ cannot\ create\ %s\ CDROMs,\ vm\ can\ only\ add\ %s\ CDROMs = 一个VM无法创建{0}个CDROM,VM只能添加{1}个CDROM failed\ to\ start\ VM[uuid\:%s] = no\ way\ to\ get\ image\ size\ of\ %s,\ report\ exception. = 无法获取{0}的镜像大小 VM[uuid\:%s]\ can\ only\ add\ %s\ CDROMs = VM[uuid:{0}]只能添加{1}个CDROM update\ vm[%s]\ priority\ to\ [%s]\ failed = 将虚拟机[{0}]的优先级更新为[{1}]失败 +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ the\ vm[uuid\:%s]\ volume\ attached\ to\ is\ not\ in\ Stopped\ state,\ current\ state\ is\ %s = +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ cannot\ find\ image\ cache. = +unable\ to\ reset\ volume[uuid\:%s]\ to\ origin\ image[uuid\:%s],\ for\ image\ type\ is\ ISO = VmInstanceStartNewCreatedVmExtensionPoint[%s]\ refuses\ to\ create\ vm[uuid\:%s] = VmInstanceStartNewCreatedVmExtensionPoint[{0}] 拒绝创建虚拟机 [uuid:{1}] VmInstanceStopVmExtensionPoint[%s]\ refuses\ to\ stop\ vm[uuid\:%s] = VmInstanceStartNewCreatedVmExtensionPoint[{0}] 拒绝停止虚拟机 [uuid:{1}] VmInstanceRebootExtensionPoint[%s]\ refuses\ to\ reboot\ vm[uuid\:%s] = VmInstanceRebootExtensionPoint[{0}] 拒绝重启虚拟机[uuid:{1}] @@ -732,6 +999,7 @@ handle\ system\ tag\ fail\ when\ creating\ vm = 在创建虚拟机时处理系 handle\ sshkeypair\ fail\ when\ creating\ vm = unable\ to\ enable\ this\ function.\ There\ are\ multi\ nics\ of\ L3\ network[uuid\:%s]\ in\ the\ vm[uuid\:\ %s] = 无法启用此功能。虚拟机[uuid:{1}]中存在多个分布式端口组[uuid:{0}]的NIC only\ one\ hostname\ system\ tag\ is\ allowed,\ but\ %s\ got = 只允许通过系统标签设置一个主机名,但是实际上有{0} +conflict\ hostname\ in\ system\ tag[%s];\ there\ has\ been\ a\ VM[uuid\:%s]\ having\ hostname[%s]\ on\ L3\ network[uuid\:%s] = invalid\ boot\ device[%s]\ in\ boot\ order[%s] = 在引导顺序[{1}]中存在无效的引导设备[{0}] cpuSockets\ must\ be\ an\ integer = CPUSockets必须为整数 cpuCores\ must\ be\ an\ integer = cpucores必须为整数 @@ -750,8 +1018,12 @@ invalid\ securityElementEnable[%s],\ %s\ is\ not\ boolean\ class = SecurityEleme invalid\ usbRedirect[%s],\ %s\ is\ not\ usbRedirect\ tag = usbRedirect[{0}]无效,{1}不是usbRedirect标记 invalid\ usbRedirect[%s],\ %s\ is\ not\ boolean\ class = usbRedirect[{0}]无效,{1}不是布尔类 rootDiskOfferingUuid\ cannot\ be\ null\ when\ create\ vm\ without\ image = 在不使用镜像的情况下创建VM时,RootDiskOfferInGuuid不能为空 +the\ resource[uuid\:%s]\ is\ a\ ROOT\ volume,\ you\ cannot\ change\ its\ owner,\ instead,change\ the\ owner\ of\ the\ VM\ the\ root\ volume\ belongs\ to = failed\ to\ find\ host\ of\ vm[uuid\=%s] = +Failed\ to\ instantiate\ volume.\ Because\ vm's\ host[uuid\:\ %s]\ and\ allocated\ primary\ storage[uuid\:\ %s]\ is\ not\ connected. = +the\ diskAO\ parameter\ is\ incorrect.\ need\ to\ set\ one\ of\ the\ following\ properties,\ and\ can\ only\ be\ one\ of\ them\:\ size,\ templateUuid,\ diskOfferingUuid,\ sourceUuid-sourceType = the\ disk\ does\ not\ support\ attachment.\ disk\ type\ is\ %s = +vm\ current\ state[%s],\ modify\ virtio\ requires\ the\ vm\ state[%s] = duplicate\ nic\ params = 复制NIC参数 could\ not\ create\ multi\ SR-IOV\ enabled\ nics\ on\ the\ same\ l3\ network = l3NetworkUuid\ of\ vm\ nic\ can\ not\ be\ null = 虚拟机NIC的L3Networkuuid不能为空 @@ -805,10 +1077,12 @@ Already\ have\ one\ userdata\ systemTag\ for\ diskOffering[uuid\:\ %s]. = DiskOf Shouldn't\ be\ more\ than\ one\ systemTag\ for\ one\ instanceOffering. = 对于一个实例提供,不应超过一个系统标记。 # In Module: console +the\ console\ agent\ is\ not\ connected;\ it's\ mostly\ like\ the\ management\ node\ just\ starts,\ please\ wait\ for\ the\ console\ agent\ connected,\ or\ you\ can\ reconnect\ it\ manually\ if\ disconnected\ for\ a\ long\ time. = cannot\ find\ host\ IP\ of\ the\ vm[uuid\:%s],\ is\ the\ vm\ running??? = 无法找到vm[uuid:{0}]的物理机IP,请确认该vm是否在运行??? vm[uuid\:%s]\ is\ not\ in\ state\ of\ %s,\ current\ state\ is\ %s = establish\ VNC\:\ unexpected\ uri\:\ %s = 建立VNC:意外的URI:{0} unable\ to\ check\ console\ proxy\ availability,\ because\ %s = 无法检查控制台代理是否可用,因为{0} +console\ proxy[uuid\:\ %s,\ status\:\ %s]\ on\ agent[ip\:\ %s]\ is\ not\ Connected,\ fail\ to\ delete\ it = Ansible\ private\ key\ not\ found. = 找不到Ansible私钥。 invalid\ management\ node\ UUID[%s] = there\ is\ other\ process\ using\ the\ port\:\ %s = @@ -851,6 +1125,7 @@ service[%s]\ is\ not\ running = 服务[{0}]未运行 cannot\ trigger\ a\ finished\ GC\ job[uuid\:%s,\ name\:%s] = 无法触发一个完成过的GC任务 management\ node[id\:%s]\ becomes\ unavailable,\ job[name\:%s,\ id\:%s]\ is\ not\ restartable = unknown\ product\ plugin\ name\:\ %s = +plugin[%s]\ name,\ productKey\ and\ vendor\ cannot\ be\ null = 插件[{0}]名称、产品密钥和供应商不能为空 parameter\ apiId[%s]\ is\ not\ a\ valid\ uuid. = 参数apiId[{0}]不是一个有效的uuid http\ timeout = HTTP 超时 failed\ to\ %s\ to\ %s\:\ IO\ Error = @@ -910,6 +1185,7 @@ host\ %s\ is\ not\ exists = 物理机{0}不存在 Shell\ fail,\ because\ %s = Shell失败,原因是{0} add\ integrity\ file[%s.%s]\ fail,\ because\ %s = 添加完整性文件[{0}.{1}]失败,原因是{2} unsupported\ operation\ for\ EncryptColumnIntegrityFactory = 不支持对EncryptColumnIntegrityFactory的操作 +the\ shared\ mount\ point\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters = invalid\ certificate\ parameter\ \:\ %s\=%s = originText\ or\ certificateText\ can\ not\ be\ null = 原始文本或证书文本不能为空 the\ security\ machine\ [%s]\ does\ not\ exist = 安全计算机[{0}]不存在 @@ -938,6 +1214,7 @@ cannot\ find\ SecurityMachine[uuid\:%s],\ it\ may\ have\ been\ deleted = there\ has\ been\ a\ security\ machine\ having\ managementIp[%s] = 已存在具有ManagementIP[{0}]的安全计算机 there\ is\ no\ security\ machine\ that\ can\ be\ activated = 没有可以激活的安全机器。 invalid\ token\ type\ %s,\ only\ supports\ %s. = 令牌类型{0}无效,仅支持{1}。 +the\ identity\ authentication\ function\ is\ enabled\ but\ the\ corresponding\ resource\ pool\ is\ not\ set,\ please\ re-enable\ the\ function\ and\ try\ again = cannot\ delete\ the\ resource\ pool\ %s\ when\ in\ use = 无法删除正在使用的资源池{0} cannot\ find\ SecretResourcePool[uuid\:%s],\ it\ may\ have\ been\ deleted = failed\ to\ connect\ client = @@ -980,6 +1257,7 @@ no\ aliyun\ account\ found\ for\ accountUuid\:\ %s = 找不到当前账户{0}对 # In Module: directory resources\ %s\ has\ already\ been\ bound\ to\ directory\ uuid[%s]\ ,\ multiple\ paths\ are\ not\ supported = 资源{0}已绑定到目录uuid[{1}],不支持多个路径 resource\ types\ %s\ are\ not\ supported\ by\ directory,\ allowed\ types\ are\ %s = 目录不支持资源类型{0},允许的类型为{1} +name\ contains\ unsupported\ characters,\ name\ can\ only\ contain\ Chinese\ characters,\ English\ letters,\ numbers,\ spaces,\ and\ the\ following\ characters\:\ ()()【】@._-+\ = circular\ dependency\ detected,\ directory\ %s\ and\ directory\ %s\ will\ cause\ circular\ dependency = 检测到循环依赖,目录{0}和目录{1}将导致循环依赖 unable\ to\ find\ directory[uuid\=%s] = duplicate\ directory\ name,\ directory[uuid\:\ %s]\ with\ name\ %s\ already\ exists = 已存在名称为{1}的重复目录名、目录[uuid:{0}] @@ -1025,6 +1303,7 @@ Ip\ address\ [uuid\:%s]\ is\ not\ belonged\ to\ nic\ [uuid\:%s] = IP地址[uuid: eip[uuid\:%s]\ has\ not\ attached\ to\ any\ vm\ nic = eip[uuid:{0}]还没有被挂载到任意虚拟机网卡 vip\ ipVersion\ [%d]\ is\ different\ from\ guestIp\ ipVersion\ [%d]. = 虚拟IP的协议号[{0}]和网卡的IP协议号[{1}]不同 Vip[%s]\ is\ in\ the\ guest\ ip\ range\ [%s,\ %s] = 虚拟IP[{0}]和网卡的IP不能在相同地址段[{1}-{2}] +the\ vm[uuid\:%s]\ that\ the\ EIP\ is\ about\ to\ attach\ is\ already\ on\ the\ public\ network[uuid\:%s]\ from\ which\ the\ vip[uuid\:%s,\ name\:%s,\ ip\:%s]\ comes = vip[uuid\:%s]\ has\ been\ occupied\ other\ network\ service\ entity[%s] = vip[uuid:{0}]已经被其他网络服务实体[{1}]占用 eip\ can\ not\ be\ created\ on\ system\ vip = 无法在系统VIP上创建EIP vip[uuid\:%s]\ is\ not\ in\ state[%s],\ current\ state\ is\ %s = vip[uuid:{0}]不处于状态[{1}]中,当前状态[{2}] @@ -1033,6 +1312,7 @@ vm\ state[%s]\ is\ not\ allowed\ to\ operate\ eip,\ maybe\ you\ should\ wait\ th vmNic\ uuid[%s]\ is\ not\ allowed\ add\ eip,\ because\ vmNic\ exist\ portForwarding\ with\ allowedCidr\ rule = 不允许vmnic uuid[{0}]添加EIP,因为vmnic存在具有AllowedCIDR规则的端口转发 cannot\ find\ Eip\ guest\ ip\:\ %s\ in\ vmNic\ ips\ \:%s = 在vmnic IP{1}中找不到EIP来宾IP{0} eip\ [uuid\:%s]\ is\ deleted = 已删除EIP[uuid:{0}] +unable\ to\ attach\ the\ L3\ network[uuid\:%s,\ name\:%s]\ to\ the\ vm[uuid\:%s,\ name\:%s],\ because\ the\ L3\ network\ is\ providing\ EIP\ to\ one\ of\ the\ vm's\ nic = # In Module: expon expon\ request\ failed,\ code\ %s,\ message\:\ %s. = @@ -1067,11 +1347,16 @@ unknown\ value\ type\ %s,\ key\ \=\ %s = 未知的值类型{0},键={1} failed\ to\ HTTP\ call\ all\ prometheus\ instances = 无法对所有Prometheus实例进行HTTP调用 # In Module: faulttolerance +pvm[uuid\:%s]\ and\ svm[uuid\:%s]\ volume\ number\ not\ matches,\ do\ not\ allowed\ to\ start = +volume\ with\ index\:\ %d,\ of\ pvm[uuid\:%s]\ and\ svm[uuid\:%s]\ have\ different\ size,\ do\ not\ allowed\ to\ start = +volume\ with\ index\:\ %d,\ of\ pvm[uuid\:%s]\ and\ svm[uuid\:%s]'s\ cache\ volume\ have\ different\ size,\ do\ not\ allowed\ to\ start = image[uuid\:%s]\ is\ still\ used\ by\ fault\ tolerance\ vm[uuid\:%s] = 容错虚拟机[uuid:{1}]仍在使用镜像[uuid:{0}] +could\ not\ delete\ l3\ network[uuid\:%s].\ Fault\ tolerance\ vm[%s]\ in\ states[%s,\ %s]\ still\ using\ it.\ Stop\ related\ fault\ tolerance\ vms\ before\ delete\ l3\ network = Can\ not\ fail-over\ vm[uuid\:%s],\ please\ enable\ ft\ in\ GlobalConfig = 无法对VM[uuid:{0}]进行故障转移,请在GlobalConfig中启用FT Can\ not\ fail-over\ vm[uuid\:%s],\ please\ confirm\ it\ is\ a\ fault\ tolerance\ vm\ group = 无法对VM[uuid:{0}]进行故障转移,请确认它是容错VM组 Can\ not\ fail-over\ vm[uuid\:%s],\ because\ fault\ tolerance\ vm\ group\ is\ not\ in\ status\ of\ [%s,\ %s] = 无法对VM[uuid:{0}]进行故障转移,因为容错VM组的状态不是[{1},{2}] Can\ not\ maintain\ host,\ because\ ft\ vms[%s]\ are\ under\ recovering = 无法维护主机,因为正在恢复FT VM[{0}] +current\ operation[api\:%s]\ is\ not\ supported\ when\ ft\ vm[uuid\:%s,\ state\:%s]\ is\ not\ stopped = Can\ not\ set\ vm\ level\ to\ %s,\ please\ enable\ ft\ in\ GlobalConfig = 无法将VM级别设置为{0},请在GlobalConfig中启用FT can\ not\ update\ ft\ vm[uuid\:%s]\ cpu\ number,\ need\ to\ stop\ both\ of\ the\ vms = 无法更新FT VM[uuid:{0}]CPU编号,需要停止两个VM can\ not\ update\ ft\ vm[uuid\:%s]\ memory\ size,\ need\ to\ stop\ both\ of\ the\ vms = 无法更新FT VM[uuid:{0}]内存大小,需要停止两个VM @@ -1093,6 +1378,7 @@ can\ not\ start\ secondary\ vm,\ because\ primary\ vm\ is\ still\ stopped = 无 Can\ not\ migrate\ ft\ secondary\ vm[uuid\:%s] = 无法迁移FT辅助虚拟机[uuid:{0}] Can\ not\ migrate\ ft\ primary\ vm[uuid\:%s] = 无法迁移FT主虚拟机[uuid:{0}] Current\ ft\ vm\ is\ in\ unknown\ status,\ can\ not\ stop\ it,\ please\ try\ to\ fail-over\ it\ manually = 当前FT虚拟机处于未知状态,无法停止,请尝试手动故障转移 +unable\ to\ start\ the\ vm[uuid\:%s].\ It\ doesn't\ have\ any\ nic,\ please\ attach\ a\ nic\ and\ try\ again = an\ other\ fault\ tolerance\ gc\ task\ is\ running,\ cancel\ the\ new\ task\ and\ wait\ return = 其他容错GC任务正在运行,请取消新任务并等待返回 can\ not\ create\ secondary\ vm,\ because\ primary\ vm\ is\ stopped = 无法创建辅助虚拟机,因为主虚拟机已停止 created\ svm\ found,\ report\ error\ for\ this\ start\ secondary\ vm\ request = 找到已创建的SVM,报告此启动辅助虚拟机请求的错误 @@ -1101,6 +1387,7 @@ pvm[uuid\:%s]\ not\ exists = PVM[uuid:{0}]不存在 could\ not\ failover.\ Primary\ vm\ is\ unknown\ but\ no\ fault\ tolerance\ network\ address\ available = 无法进行故障转移。主云主机未知,但没有可用的容错网络地址 could\ not\ failover.\ Secondary\ vm\ is\ unknown\ but\ no\ fault\ tolerance\ network\ address\ available = 无法进行故障转移。辅助云主机未知,但没有可用的容错网络地址 unexpected\ exception = 意外异常 +cannot\ found\ available\ ip\ from\ current\ ft\ network.\ Check\ whether\ global\ config[category\:ft\ name\:fault.tolerance.network.cidr]\ is\ correctly\ set,\ and\ confirm\ that\ host[uuid\:%s]\ own\ ip\ address\ in\ the\ CIDR = can\ not\ start\ secondary\ vm,\ because\ primary\ vm\ is\ stopped = 无法启动辅助云主机,因为主云主机已停止 not\ fault\ tolerance\ vm\ port\ found = 未找到容错VM端口 failed\ to\ allocate\ port\ of\ nic[uuid\:\ %s]\ on\ host[uuid\:\ %s] = 无法分配主机[uuid:{1}]上的NIC[uuid:{0}]的端口 @@ -1123,6 +1410,7 @@ DHCP\ server\ ip\ [%s]\ is\ not\ a\ IPv6\ address = DHCP服务器地址[{0}]不 DHCP\ server\ ip\ [%s]\ is\ already\ existed\ in\ l3\ network\ [%s] = 三层网络[{1}]已经配置了DHCP服务器地址[{0}] DHCP\ server\ ip\ [%s]\ can\ not\ be\ equaled\ to\ gateway\ ip = DHCP服务器地址[{0}]不能等于网关地址 DHCP\ server\ ip\ [%s]\ can\ not\ be\ configured\ to\ system\ l3 = 系统网络不能配置DHCP服务器地址[{0}] +could\ not\ delete\ ip\ address,\ because\ ip\ [%s]\ is\ dhcp\ server\ ip = could\ not\ set\ dhcp\ v4\ server\ ip,\ because\ there\ is\ no\ ipv4\ range = could\ not\ set\ dhcp\ v4\ server\ ip,\ because\ ip[%s]\ is\ not\ the\ cidr\ of\ l3\ [%s] = could\ not\ set\ dhcp\ v6\ server\ ip,\ because\ there\ is\ no\ ipv6\ range = @@ -1140,6 +1428,7 @@ failed\ to\ allocate\ DHCP\ server\ IP\ for\ L3\ network[uuid\:%s] = cannot\ find\ bridge\ name\ for\ L3\ network[%s] = could\ not\ attach\ eip\ because\ there\ is\ no\ gateway\ for\ nic[uuid\:%s] = could\ not\ attach\ eip\ because\ ipv6\ eip\ can\ ONLY\ be\ attached\ to\ flat\ network = 无法附加EIP,因为IPv6 EIP只能附加到三层网络 +L2Network\ where\ vip's\ L3Network\ based\ hasn't\ attached\ the\ cluster\ where\ vmNic[uuid\:%s]\ located = can\ not\ bound\ more\ than\ 1\ %s\ eip\ to\ a\ vm\ nic[uuid\:%s]\ of\ flat\ = 无法将1个以上的{0}EIP绑定到平面的VM NIC[uuid:{1}] unable\ to\ apply\ the\ EIP\ operation\ for\ the\ the\ vm[uuid\:%s,\ state\:%s],\ because\ cannot\ find\ the\ VM's\ hostUUid = 无法为虚拟机[uuid:{0}, state:{1}]应用EIP操作,因为无法找到该虚拟机的主机uuid(hostUuid) host[uuid\:%s]\ is\ not\ connected = 主机[uuid:{0}]未连接 @@ -1202,7 +1491,7 @@ failed\ to\ download\ guest\ tools\ iso\ because\ no\ kvm\ host[uuid\:%s]\ found no\ available\ cdrom\ device\ for\ vm[uuid\:%s]\ to\ attach\ guest-tools = failed\ to\ attach\ guest\ tools\ iso\ to\ vm[uuid\:%s],\ because\:%s = 无法为虚拟机[uuid:{0}]挂载增强工具镜像,因为:{1} failed\ to\ detach\ guest\ tools\ iso\ from\ vm[uuid\:%s],\ because\:%s = 无法从VM[uuid:{0}]分离来宾工具ISO,因为:{1} -failed\ to\ get\ guest\ tools\ state\ from\ prometheus\:\ [metric\=%s] = 无法从 Prometheus 获取 metric={0} 的 VM-Tools 状态 +failed\ to\ get\ guest\ tools\ state\ from\ prometheus\:\ [metric\=%s] = 无法从 Prometheus 获取 metric 等于 {0} 的 VM-Tools 状态 can\ not\ be\ here = 不能在这里。 # In Module: header @@ -1220,8 +1509,6 @@ Incorrect\ %s\ settings,\ valid\ value\ is\ %s = 不正确的设置{0},有效 cannot\ connect\ to\ [%s]\ in\ %d\ milliseconds,\ so\ aliyun\ openapi\ is\ unreachable. = 无法在{1}毫秒内连接到[{0}],因此无法访问阿里云OpenAPI。 [%s,\ %s]\ not\ a\ valid\ ak\ pair,\ please\ check\ it.\ more\ details\:\ %s = no\ bucket\ found\ for\ backup = 没有可用的Bucket执行备份 -accessKey\ and\ keySecret\ must\ be\ set! = 必须设置AccessKey和KeySecret! -regionId\ must\ be\ set! = 必须设置RegionID! cannot\ find\ key\ /\ secret\ from\ msg = 无法从消息中找到密钥/机密 no\ such\ instance\ type\ support\:\ %s = couldn't\ find\ router\ table\ in\ router\:\ [%s] = @@ -1240,6 +1527,7 @@ ecs\ image\ existed\ remote,\ name\:\ %s,\ created\ time\:\ %s = 云主机镜像 no\ such\ instance-offering\ uuid = mem\ must\ \\\\>\ 1G,\ and\ mem\ GB\ must\ \\\\>\=\ cpu = No\ Available\ instance\ types\ now. = 没有可用的实例类型 +This\ region\ [%s]\ cannot\ produce\ instance\ type\ [%s]\ now,\ please\ select\ another\ instance\ type\ or\ another\ region = no\ system\ disk\ found\ for\ ecs\:\ [%s],\ ecs\ id\ is\:\ [%s] = 没有系统云盘可用来创建云主机,云主机id是: [{1}] Only\ delete\ ecs\ which\ status\ is\ running\ or\ stopped,\ now\ is\ %s = 只能删除状态为运行中或者已停止的云主机,现在云主机状态为{0} Only\ postpaid\ ecs\ support\ delete\ remote,\ the\ indicate\ ecs\ charge\ type\ is\:\ %s = 只有已付费的云主机支持删除,目前云主机付费状态是: {0} @@ -1263,14 +1551,19 @@ Only\ support\ ImageStoreBackupStorage = 用本地镜像创建阿里云上的镜 image\ name\ cannot\ starts\ with\ http\://\ or\ https\:// = 镜像名称不可以以http://或https://开始 no\ backup\ storage\ found\ for\ imageUuid\:\ %s = 未找到uuid为{0}的镜像服务器 exceeded\ backup\ storage\ found\ for\ the\ imageUuid\:\ %s,\ please\ indicate\ it\ manually = 发现多个存在镜像uuid为{0}的镜像服务器,请尝试指定镜像服务器 +valid\ platform\:[%s]\ for\ aliyun\ image\ import,\ valid\ value\ are\:\ [%s] = image\ [%s]\ is\ not\ enable\ now = 镜像[{0}]不可用 the\ indicated\ image\ [%s]\ is\ importing\ to\ datacenter\ [%s]\ now... = 指定的镜像[{0}]正在被导入到数据中心[{1}]中... ecs\ instance[%s]\ isn't\ existed,\ please\ check\ it. = 云主机[{0}]不存在,请进行核查 +Only\ ecs\ instances\ that\ are\ in\ the\ running\ and\ stopped\ status\ can\ detach\ the\ eip\ ,\ but\ the\ ecs\ [%s]\ status\ is\ [%s]\ now\ = virtual\ border\:\ %s\ has\ been\ deleted = 边界路由器: {0}已经被删除 couldn't\ find\ such\ router\ interface\:\ [%s] = destination\ cidr\ [%s]\ is\ existed\ and\ point\ to\ another\ instance-id\ [%s],\ please\ check\ or\ delete\ it\ first = 目标CIDR[{0}]已经存在且指向其他云主机[{1}],请检查或删除它 couldn't\ find\ such\ vr\ entry\:\ [%s] = +Only\ esc\ instances\ that\ are\ in\ the\ running\ and\ stopped\ status\ can\ attach\ the\ eip\ ,\ but\ the\ ecs\ [%s]\ status\ is\ [%s]\ now\ = Vbr\:\ [%s]\ is\ in\ create\ connection\ progress,\ please\ wait... = 虚拟边界路由器: [{0}]正在创建连接中,请稍后... +custom\ cidr\ [%s]\ is\ already\ existed\ in\ vbr\ [%s],\ it\ is\ overlapped\ with\ target\ cidr\ [%s],\ please\ check\ and\ delete\ it\ first. = +custom\ cidr\ [%s]\ is\ already\ existed\ in\ vrouter\ [%s],\ it\ is\ overlapped\ with\ target\ cidr\ [%s],\ please\ check\ and\ delete\ it\ first. = No\ Such\ VRouter\ nic\ found\ for\ l3network\:\ %s = 未找到三层网络{0}对应的虚拟路由器网卡 No\ Such\ Cidr\ found\ for\ l3network\:\ %s = 未找到三层网络{0}对应的CIDR No\ Such\ Ecs\ VPC\ found\:\ %s = @@ -1293,6 +1586,8 @@ OssBucket[%s]\ is\ not\ attached. = oss Bucket[{0}]没有被添加 domain,\ key,\ secret\ must\ be\ set\ all = 域、密钥、机密必须全部设置 oss\ bucket\ is\ not\ empty! = oss Bucket不为空 Root\ volume\ cannot\ be\ deleted = 云盘不能被删除 +Cannot\ set\ the\ disk's\ deleteWithInstance\ property\ to\ false\ when\ the\ category\ property\ of\ the\ disk\ is\ ephemeral = +Cannot\ set\ the\ disk's\ deleteWithInstance\ property\ to\ false\ when\ the\ category\ property\ of\ the\ disk\ is\ cloud\ and\ portable\ property\ is\ false = The\ disk\ [%s]\ is\ not\ attach\ on\ any\ instance\ = 该云盘[{0}]没有加载到任何云主机 Only\ data\ disk\ can\ be\ mounted\ on\ ecs = 只有云盘可以挂装到云主机上 The\ disk\ not\ be\ attach\ on\ any\ ecs = 该云盘没有加载到任何云主机 @@ -1302,6 +1597,7 @@ The\ disk\ [%s]\ is\ already\ mounted\ on\ the\ instance\ [%s] = 云盘[{0}]已 Only\ data\ disk\ can\ attach\ to\ ecs = 只有云盘能加载到云服务器 Cannot\ attach\ disk\ when\ in\ use = 不能加载正在使用的云盘 Non-independent\ disk\ can\ only\ be\ destroyed\ with\ instances = 未独立的云盘只能和云主机一起删除 +The\ size\ and\ snapshot\ id\ in\ the\ request\ parameter\ must\ select\ one\ of\ the\ items\ to\ specify\ the\ size\ of\ the\ disk\ or\ create\ a\ disk\ using\ the\ snapshot. = Not\ allowed\ create\ disk\ on\ root\ volume\ snapshot = 不允许在云盘快照上创建云盘 the\ disk\ name\ or\ description\ cannot\ set\ start\ with\ 'http\://'\ or\ 'https\://'\ = 云盘名称和介绍不能以http://或https://开头 The\ operation\ allows\ only\ when\ ecs\ state\ of\ the\ ecs\ instance\ status\ be\ running\ or\ stopped = 当云主机态为运行中或已停止时该操作才被允许 @@ -1315,7 +1611,6 @@ couldn't\ find\ such\ datacenter\:\ [%s] = couldn't\ find\ such\ identityzone\:\ [%s] = couldn't\ find\ such\ vpc\:\ [%s] = couldn't\ find\ such\ vswitch\:\ [%s] = -couldn't\ find\ such\ oss\ bucket\:\ [%s] = couldn't\ find\ such\ virtual\ border\ router\:\ [%s] = couldn't\ find\ such\ virtual\ router\:\ [%s] = non\ support\ virtual\ router\ type\:\ [%s] = @@ -1326,7 +1621,6 @@ couldn't\ find\ such\ virtual\ router\:\ %s = non\ supported\ virtual\ router\ type\:\ %s = couldn't\ find\ such\ virtual\ router\ from\ vpcUuid\:\ %s = no\ current\ used\ key/secret\ for\ aliyun! = -no\ current\ used\ key/secret\ for\ %s! = dcType\ not\ supported\ type\ [%s] = DCType不支持类型[{0}] regionId\ [%s]\ already\ created\ by\ ak\ [%s] = 数据中心ID[{0}]已经被AccessKey[{1}]创建 DataCenter\ [%s]\ is\ still\ in\ sync\ progress,\ please\ wait. = 数据中心[{0}]仍在同步进程中,请稍后 @@ -1351,6 +1645,7 @@ next\ hop\ type\ [%s]\ not\ supported\ create\ route\ entry\ now! = 不支持下 virtual\ border\ router\ only\ support\ routerinterface\ as\ next\ hop\ type = 作为下一跳类型,虚拟边界路由只支持路由接口 vswitch's\ cidr\ [%s]\ not\ in\ the\ vpc's\ [%s] = 虚拟交换机的CIDR没有在VPC[{1}]中 cidr\ is\ overlap\ by\ another\ vswitch\:\ %s = CIDR和其他的虚拟交换机{0}有重叠 +invalid\ CidrBlock\:\ %s,\ which\ must\ subnet\ in\ '10.0.0.0/8',\ '172.16.0.0/12',\ '192.168.0.0/16' = no\ such\ virtual\ border\ router\:\ %s = 没有这个虚拟边界路由器: {0} no\ such\ virtual\ router\:\ %s = 没有这个的虚拟路由: {0} localGateway\ is\ not\ IPv4\:\ %s = 本地网关地址不是IPV4: {0} @@ -1369,6 +1664,8 @@ remoteCidr\ must\ be\ Cidr! = 远程CIDR必须是CIDR localCidr\ and\ remoteCidr\ must\ be\ Cidr! = 本地CIDR和远程CIDR必须是CIDR vpngateway\ [%s]\ existed,\ cannot\ delete\ remote = VPN网关[{0}]已经存在,不能删除远程的 +# In Module: i18n-tools + # In Module: iam1 AccountGroup[uuid\:%s,\ name\:%s]\ has\ been\ deleted = 账号 {0}(名称为 {1})已经被删除 failed\ to\ move\ account\ group[uuid\:%s]\ to\ it\ self = 不允许将账户组 {0} 移动到它自己下 @@ -1396,6 +1693,7 @@ organization[%s]\ is\ repeated.\ = 部门[{0}]出现重复 project[%s]\ is\ not\ exist.\ = 项目[{0}]不存在 fail\ to\ build\ VirtualID\ info\ from\ file.\ = 不能解析文件内容 virtualID[uuid\:%s]\ not\ in\ project[uuid\:%s] = VirtualID[uuid:{0}]不在项目[uuid:{1}]中 +Can\ not\ do\ operations,\ because\ current\ organization[uuid\:%s]\ is\ staled,\ please\ enable\ it = organization[uuid\:%s]\ is\ parent\ of\ the\ organization[uuid\:%s],\ cannot\ set\ it\ as\ a\ child\ organization = 部门[uuid:{0}]是部门[uuid:{1}]的上级部门,无法被设置为子部门 the\ project[uuid\:\ %s,\ name\:%s]\ is\ in\ state\ of\ %s\ which\ disallows\ the\ operation[%s] = 项目[[uuid: {0}, 名称:{1}]]是{2}状态,不允许执行[{3}]操作 can\ not\ parse\ the\ cron\ expression = 无法分析Cron表达式 @@ -1405,6 +1703,7 @@ wrong\ virtual\ ID[uuid\:%s],\ not\ existing\ or\ wrong\ password = 错误的vir virtual\ ID[name\:%s]\ is\ disabled = virtual ID[名称:{0}]不可用 virtual\ ID[name\:%s]\ not\ belonging\ to\ the\ project[name\:%s] = virtual ID[名称:{0}]不属于项目[name:{1}] the\ quota[name\:%s]\ of\ Account[uuid\:%s]\ can\ not\ be\ %d,\ otherwise\ it\ will\ exceeds\ the\ quota\ of\ organization[uuid\:%s] = 帐户[uuid:{1}]的配额[名称:{0}]不能为{2},否则将超过组织[uuid:{3}]的配额 +Can\ not\ do\ operations,\ because\ Current\ virtualID[uuid\:%s]\ is\ staled,\ please\ enable\ it = only\ admin\ and\ the\ virtual\ ID\ itself\ can\ do\ the\ update = 只有admin和virtual ID本身可以执行更新操作 old\ password\ is\ not\ equal\ to\ the\ original\ password,\ cannot\ update\ the\ password\ of\ virtual\ ID[uuid\:%s] = 旧密码不等于原始密码,无法更新虚拟ID[uuid:{0}]的密码 attribute\ name\ cannot\ be\ null,\ value[%s] = 属性不能为null,输入值[{0}] @@ -1424,6 +1723,7 @@ retire\ policy\ must\ be\ deleted\ before\ pull\ the\ project\ out\ of\ Retired\ login\ is\ prohibited\ because\ the\ project\ is\ in\ state\ of\ %s = 禁止登录,因为项目处于{0}状态 no\ quota[name\:%s]\ found = 未找到配额[名称:{0}] organization[uuid\:%s]\ is\ a\ Company\ that\ cannot\ have\ parent\ organization = 组织[uuid:{0}]是不能有上级组织的公司 +parent\ organization[uuid\:%s]\ cannot\ be\ a\ child\ organization[uuid\:%s]\ of\ a\ childOrganization = duplicate\ virtualID\ name[%s] = 重复的用户名[{0}] duplicate\ project\ name[%s] = 重复的项目名[{0}] invalid\ project\ name[%s],\ an\ account\ or\ project\ with\ the\ same\ name\ exists = 无效的项目名[{0}],已有账户或项目使用了相同的名称 @@ -1449,6 +1749,8 @@ invalid\ time[%s],\ it\ should\ be\ in\ format\ of\ for\ example\ 10m,\ 1h,\ 2d invalid\ spending\ value[%s],\ spending\ value\ should\ between\ 0\ and\ %f = 无效的费用[{0}], 费用范围应该在0到{1}之间 invalid\ spending\ value[%s],\ it\ should\ be\ in\ format\ of\ for\ example\ 10.001 = 无效的费用[{0}], 费用格式应该符合例如:10.001 invalid\ date\ or\ time[%s],\ it\ cannot\ be\ before\ current\ time[%s] = 无效的日期或时间,回收时间不能在当前时间之前[{1}] +virtual\ ID[uuid\:%s]\ already\ has\ admin\ related\ attributes,\ can\ not\ add\ %s = +organiztion\ ID[uuid\:%s]\ already\ has\ opoeration\ attributes,\ can\ not\ add\ %s = virtual\ id[uuid\:%s]\ already\ has\ a\ project\ operator\ attribute = 虚拟ID[uuid:{0}]已具有项目运算符属性 cannot\ find\ zone[uuid\:%s] = 找不到数据中心[uuid:{0}] project[uuid\:%s]\ already\ has\ a\ project\ admin = 项目[uuid:{0}]已经设置过项目管理员了 @@ -1464,6 +1766,7 @@ failed\ to\ login\:\ account\ is\ disabled = 无法登录: 账号被禁用 wrong\ account\ name\ or\ password = 错误的账号名称或密码 cannot\ find\ the\ resource[uuid\:%s];\ wrong\ resourceUuid\ or\ the\ resource\ is\ admin\ resource = 无法找到资源[uuid:{0}]: 错误的资源uuid或者资源是管理员资源 unable\ to\ find\ account[uuid\=%s] = 无法找到账号 {0} +Invalid\ ChangeResourceOwner\ operation.Original\ owner\ is\ the\ same\ as\ target\ owner.Current\ account\ is\ [uuid\:\ %s].The\ resource\ target\ owner\ account[uuid\:\ %s].The\ resource\ original\ owner\ account[uuid\:%s]. = 无效的 ChangeResourceOwner 操作。原始拥有者与目标拥有者相同。当前账户是[uuid: {0}],资源目标拥有者账户[uuid: {1}],资源原始拥有者账户[uuid:{2}] cannot\ find\ the\ account[uuid\:%s] = 找不到账户[uuid:{0}] unable\ to\ create\ an\ account.\ An\ account\ already\ called\ %s = 不能创建账户,“{0}”已经被使用 account\ cannot\ delete\ itself = 账户不能删除自己 @@ -1485,7 +1788,9 @@ the\ account[uuid\:%s]\ used\ [name\:%s,\ usedValue\:%s]\ exceeds\ request\ quot session\ of\ message[%s]\ is\ null = 消息 {0} 的会话是空的 session\ uuid\ is\ null = session uuid 是空的 additional\ authentication\ required = 需要额外认证 +quota\ exceeding.The\ resource\ owner(or\ target\ resource\ owner)\ account[uuid\:\ %s\ name\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s],\ Current\ used\:%s,\ Request\:%s.\ Please\ contact\ the\ administrator. = 配额已超出。资源拥有者(或目标资源拥有者)帐户[uuid:{0},名称:{1}]超过了配额[名称:{2},值:{3}],当前使用:{4},请求:{5}。请与管理员联系。 quota\ exceeding.\ The\ account[uuid\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s].\ Please\ contact\ the\ administrator. = +quota\ exceeding.\ The\ account[uuid\:\ %s]\ exceeds\ a\ quota[name\:\ %s,\ value\:\ %s],\ Current\ used\:%s,\ Request\:%s.\ Please\ contact\ the\ administrator. = 配额已超出。帐户[uuid:{0}]超过了配额[名称:{1},值:{2}],当前使用:{3},请求:{4}。请与管理员联系。 Login\ sessions\ hit\ limit\ of\ max\ allowed\ concurrent\ login\ sessions = Session\ expired = unsupported\ login\ type\ %s = 不支持的登录类型{0} @@ -1505,6 +1810,7 @@ account[uuid\:%s]\ has\ no\ access\ to\ resources\ with\ owner-only\ scope\:\ %s # In Module: image Failed\ because\ management\ node\ restarted. = 失败,因为管理节点已重新启动。 +the\ backup\ storage[uuid\:%s]\ is\ not\ in\ status\ of\ Connected,\ current\ status\ is\ %s = 镜像服务器[uuid:{0}]未处于Connected状态,当前状态是{1} The\ aarch64\ architecture\ does\ not\ support\ legacy. = AARCH64体系结构不支持旧版。 volume[uuid\:%s]\ is\ not\ Ready,\ it's\ %s = 云盘[uuid:{0}]未Ready,它现在为{1} volume[uuid\:%s]\ is\ not\ Enabled,\ it's\ %s = 云盘[uuid:{0}]未Enabled,它现在为{1} @@ -1529,7 +1835,6 @@ the\ image[uuid\:%s,\ name\:%s]\ is\ not\ deleted\ on\ the\ backup\ storage[uuid Cannot\ find\ image[uuid\:%s],\ it\ may\ have\ been\ deleted = Failed\ to\ download\ image[name\:%s]\ on\ all\ backup\ storage%s. = unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids%s,\ list\ errors\ are\:\ %s = 不能根据[uuids:{0}]分配镜像服务器,错误清单为: {1} -image\ [uuid\:%s]\ has\ been\ deleted = 镜像[uuid:{0}]已经被删除 failed\ to\ create\ image\ from\ root\ volume[uuid\:%s]\ on\ all\ backup\ storage,\ see\ cause\ for\ one\ of\ errors = 在所有镜像服务器上从云盘[uuid:{0}]创建镜像失败,查看错误原因 cannot\ find\ proper\ backup\ storage = 找不到适当的备份存储 failed\ to\ allocate\ all\ backup\ storage[uuid\:%s],\ a\ list\ of\ error\:\ %s = 镜像服务器[uuid:{0}]分配失败,错误清单:{1} @@ -1575,11 +1880,17 @@ failed\ to\ increase\ vm\ cpu,\ error\ details\:\ %s = 无法增加VM CPU,错 unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:%d]\ to\ check\ DNS = 无法连接主机 [ip:{0}, 用户名:{1}, ssh端口:{2}] 做 DNS 检查,请检查用户名密码是否正确 the\ host[uuid\:%s,\ status\:%s]\ is\ not\ Connected = 主机[uuid:{0}, 状态:{1}]不是Connected状态 cannot\ do\ the\ operation\ on\ the\ KVM\ host = 无法在 KVM 主机上执行操作 +cannot\ do\ volume\ snapshot\ merge\ when\ vm[uuid\:%s]\ is\ in\ state\ of\ %s.\ The\ operation\ is\ only\ allowed\ when\ vm\ is\ Running\ or\ Stopped = 在虚拟机[uuid:{0}]处于状态[1]时无法进行快照合并 +live\ volume\ snapshot\ merge\ needs\ libvirt\ version\ greater\ than\ %s,\ current\ libvirt\ version\ is\ %s.\ Please\ stop\ vm\ and\ redo\ the\ operation\ or\ detach\ the\ volume\ if\ it's\ data\ volume = 快照合并需要libvirt版本大于[0],当前libvirt版本为[1],请停止虚拟机并重新执行操作,或者将数据卷从虚拟机中分离 vm[uuid\:%s]\ is\ not\ Running\ or\ Stopped,\ current\ state[%s] = 虚拟机[uuid:{0}]未处在Running或Stopped状态, 现在状态为[{1}] kvm\ host[uuid\:%s,\ name\:%s,\ ip\:%s]\ doesn't\ not\ support\ live\ snapshot.\ please\ stop\ vm[uuid\:%s]\ and\ try\ again = failed\ to\ migrate\ VM = 迁移虚拟机失败 +failed\ to\ update\ nic[vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s = 无法更新主机[uuid:{1}, ip:{2}] 虚拟机[vm:{0}]的网卡: {3} +failed\ to\ attach\ nic[uuid\:%s,\ vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s,\ please\ try\ again\ or\ delete\ device[%s]\ by\ yourself = 无法将网卡[uuid:{0}, vm:{1}] 添加到主机[uuid:{2}, ip:{3}],因为:{4},请重新尝试或者自行删除设备[5] +failed\ to\ attach\ nic[uuid\:%s,\ vm\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],because\ %s = 无法将网卡[uuid:{0}, vm:{1}] 添加到主机[uuid:{2}, ip:{3}],因为:{4} failed\ to\ detach\ data\ volume[uuid\:%s,\ installPath\:%s]\ from\ vm[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = 无法在KVM主机[uuid:{4}, ip:{5}]上为虚拟机[uuid:{2}, name:{3}]卸载云盘[uuid:{0}, installPath:{1}],因为: {6} In\ the\ hypervisorType[%s],\ attach\ volume\ is\ not\ allowed\ in\ the\ current\ vm\ instance\ state[%s]. = +failed\ to\ attach\ data\ volume[uuid\:%s,\ installPath\:%s]\ to\ vm[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = 无法在主机[uuid:{4}, ip:{5}]上挂载硬盘[uuid:{0}, installPath:{1}]到虚拟机[uuid:{2}, name:{3}],因为: {6} failed\ to\ destroy\ vm[uuid\:%s\ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = 无法在主机[uuid:{2}, ip:{3}]上删除虚拟机[uuid:{0} name:{1}],原因: {4} unable\ to\ destroy\ vm[uuid\:%s,\ \ name\:%s]\ on\ kvm\ host\ [uuid\:%s,\ ip\:%s],\ because\ %s = unable\ to\ destroy\ a\ vm = 无法删除虚拟机 @@ -1598,15 +1909,18 @@ host\ %s\ is\ not\ managed\ by\ current\ mn\ node = host\ %s\ is\ not\ connected,\ skip\ to\ restart\ kvmagent = running\ task\ exists\ on\ host\ %s = failed\ to\ get\ local\ running\ tasks\ in\ some\ MN = +detected\ abnormal\ status[host\ uuid\ change,\ expected\:\ %s\ but\:\ %s\ or\ agent\ version\ change,\ expected\:\ %s\ but\:\ %s]\ of\ kvmagent,it's\ mainly\ caused\ by\ kvmagent\ restarts\ behind\ zstack\ management\ server.\ Report\ this\ to\ ping\ task,\ it\ will\ issue\ a\ reconnect\ soon = 检测到 KVM 代理异常状态[主机uuid改变,期望值:{0},实际值:{1} 或代理版本改变,期望值:{2},实际值:{3}],这通常是因为 KVM 代理重启导致管理节点无法获取到 KVM 代理状态。请等待主机重新完成后重试 unable\ to\ connect\ to\ kvm\ host[uuid\:%s,\ ip\:%s,\ url\:%s],\ because\ %s = 连接主机[uuid:{0}, ip:{1},url:{2}]失败,因为:{3} host\ can\ not\ access\ any\ primary\ storage = 主机无法访问任何数据存储 -connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = 连接主机 {0} [ip={1}] 失败 +connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = 连接主机 {0} [ip:{1}] 失败 the\ host[%s]\ ssh\ port[%s]\ not\ open\ after\ %s\ seconds,\ connect\ timeout = 主机[{0}]SSH端口[{1}]在{2}秒后未打开,连接超时 host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = failed\ to\ connect\ host[UUID\=%s]\ with\ SSH\ password = failed\ to\ connect\ host[UUID\=%s]\ with\ private\ key = unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:\ %d,\ ]\ to\ do\ DNS\ check,\ please\ check\ if\ username/password\ is\ wrong;\ %s = 无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2} ]做DNS检查,请检查用户名密码是否正确;{3} failed\ to\ ping\ all\ DNS/IP\ in\ %s;\ please\ check\ /etc/resolv.conf\ to\ make\ sure\ your\ host\ is\ able\ to\ reach\ public\ internet = 在{0}中的所有DNS/IP都ping失败了,请检查 /etc/resolv.conf 来确保你的主机能连接到公网 +unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:%d]\ to\ check\ the\ management\ node\ connectivity,please\ check\ if\ username/password\ is\ wrong;\ %s = 无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2}] 做管理节点检查,请检查用户名密码是否正确;{3} +the\ KVM\ host[ip\:%s]\ cannot\ access\ the\ management\ node's\ callback\ url.\ It\ seems\ that\ the\ KVM\ host\ cannot\ reach\ the\ management\ IP[%s].\ %s\ %s = 主机[ip:{0}]无法访问管理节点的回调地址。似乎是主机的管理IP无法访问,{2} {3} unable\ to\ check\ whether\ the\ host\ is\ taken\ over = 无法检查主机是否已被接管 unable\ to\ get\ the\ host\ takeover\ information = 无法读取主机的接管信息 the\ host[ip\:%s]\ has\ been\ taken\ over,\ because\ the\ takeover\ flag[HostUuid\:%s]\ already\ exists\ and\ utime[%d]\ has\ not\ exceeded\ host\ ping\ interval[%d] = 主机[IP:{0}]已被接管,因为接管标志[HOSTuuid:{1}]已存在,并且UTIME[{2}]未超过主机ping间隔[{3}] @@ -1637,6 +1951,7 @@ vm[uuid\:%s]\ crashes\ due\ to\ kernel\ error = VM[uuid:{0}]因内核错误而 host[uuid\:\ %s]\ memory\ ecc\ triggered,\ detail\:\ %s = 主机[uuid:{0}]内存ECC已触发,详细信息:{1} there\ are\ still\ hosts\ not\ have\ the\ same\ cpu\ model,\ details\:\ %s = 仍存在host有不同的cpu模型,详细信息:{0} pci\ bridge\ need\ a\ value\ greater\ than\ 0\ and\ lower\ than\ 32 = PCI桥需要大于0且小于32的值 +vm\ current\ state[%s],\ modify\ bus\ type\ requires\ the\ vm\ state[%s] = VM 当前状态[{0}],修改 BUS 类型需要VM状态[{1}] vm\ do\ not\ support\ having\ both\ SCSI\ and\ Virtio-SCSI\ bus\ type\ volumes\ simultaneously. = host[uuid\:%s]\ does\ not\ have\ cpu\ model\ information,\ you\ can\ reconnect\ the\ host\ to\ fix\ it = 主机[uuid:{0}]无cpu模型信息,你可以尝试重连来解决这个问题 failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 在主机[uuid:{3}]上为二层网络[uuid:{1}, type:{2}]创建网桥[{0}]失败,原因: {4} @@ -1647,6 +1962,7 @@ failed\ to\ check\ bridge[%s]\ for\ l2VlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vlan\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 无法在KVM主机[uuid:{4}]上删除二层网络[uuid:{1},类型:{2},VLAN:{3}]的网桥[{0}],因为{5} failed\ to\ apply\ rules\ of\ security\ group\ rules\ to\ kvm\ host[uuid\:%s],\ because\ %s = 不能应用安全组规则在主机t[uuid:{0}]上, 因为 {1} failed\ to\ check\ default\ rules\ of\ security\ group\ on\ kvm\ host[uuid\:%s],\ because\ %s = 在host[uuid:{0}]上检查默认安全组规则失败 +Failed\ to\ start\ vm,\ because\ can\ not\ disable\ vm.cpu.hypervisor.feature\ with\ vm.cpuMode\ none = 无法启动虚拟机:当 vm.cpuMode 为 none 时无法禁用 vm.cpu.hypervisor.feature cannot\ get\ vmUuid\ from\ msg\ %s = 无法从消息{0}获取VMuuid unable\ to\ do\ vm\ sync\ on\ host[uuid\:%s,\ ip\:%s]\ because\ %s = 不能在主机[uuid:{0}, ip:{1}]上执行虚拟机状态同步操作,因为{2} The\ vm[%s]\ state\ is\ in\ shutdown\ for\ a\ long\ time,\ check\ whether\ the\ vm\ is\ normal = 虚拟机[{0}]长时间处于关闭状态,请检查虚拟机是否正常 @@ -1666,7 +1982,6 @@ ldapServer[uuid\=%s,\ name\=%s]\ has\ been\ deleted = LDAP服务器 {0} (名 The\ LDAP\ server[url\=%s,base\=%s]\ already\ exists = LDAP 服务器[URL={0}, 起始DN={1}]已存在 all\ ldap\ account\ importing\ attempt\ is\ failed.\ ldapServerUuid\=%s = 所有导入第三方用户服务器 {0} 的尝试都失败了 all\ ldap\ account\ unbinding\ attempt\ is\ failed.\ ldapServerUuid\=%s = 所有解绑第三方用户服务器 {0} 的尝试都失败了 -not\ support = 不是支持 query\ ldap\ entry\ fail,\ %s = 查询LDAP条目失败,{0} query\ ldap\ entry[filter\=%s]\ fail,\ because\ %s = 查询 LDAP 条目 [筛选器:{0}] 失败,原因是 {1} user[%s]\ is\ not\ exists\ on\ LDAP/AD\ server[address\=%s,\ baseDN\=%s] = LDAP 服务器 [URL={1}, 基础DN={2}] 中用户 [DN={0}] 不存在 @@ -1756,6 +2071,7 @@ could\ not\ add\ backend\ server\ vmnic\ to\ serverGroup[uuid\:%s]\ ,because\ vm L3\ networks[uuids\:%s]\ of\ the\ vm\ nics\ has\ no\ network\ service[%s]\ enabled = 云主机网卡的三层网络没有可用的网络服务 the\ vm\ nics[uuid\:%s]\ are\ already\ on\ the\ load\ balancer\ servegroup\ [uuid\:%s] = VM NIC[uuid:{0}]已位于负载平衡器ServerGroup[uuid:{1}]上 could\ not\ add\ backend\ server\ vmnic\ to\ serverGroup\ [uuid\:%s],\ because\ vmnic\ ip\ [ipAddress\:%s]\ is\ repeated = 无法将后端服务器vmnic添加到ServerGroup[uuid:{0}],因为vmnic IP[IPAddress:{1}]重复 +could\ not\ add\ vm\ nic\ [uuid\:%s]\ to\ server\ group\ [uuid\:%s]\ because\ listener\ [uuid\:%s]\ attached\ this\ server\ group\ already\ the\ nic\ to\ be\ added = could\ not\ add\ backend\ server\ ip\ to\ serverGroup\ [uuid\:%s],\ because\ ip\ [ipAddress\:%s]\ is\ repeated = 无法将后端服务器IP添加到ServerGroup[uuid:{0}],因为IP[IPAddress:{1}]重复 invalid\ \ weight[serverIp\:%s,weight\:%s],\ weight\ is\ not\ in\ the\ range\ [%d,\ %d] = 权重[服务器IP:{0},权重:{1}]无效,权重不在范围[{2},{3}]内 the\ server\ ips\ [uuid\:%s]\ are\ already\ on\ the\ load\ balancer\ servegroup\ [uuid\:%s] = 服务器IP[uuid:{0}]已在负载平衡器ServerGroup[uuid:{1}]上 @@ -1783,6 +2099,7 @@ could\ not\ add\ backend\ server\ ip\ to\ serverGroup\ [uuid\:%s],\ because\ ip\ could\ not\ add\ server\ ip\ to\ share\ load\ balancer\ server\ group = 无法将服务器IP添加到共享负载平衡器服务器组 could\ not\ change\ backendserver,\ beacause\ vmincs\ and\ serverips\ is\ null = 无法更改后端服务器,因为VMINCS和ServerIPS为空 can\ not\ get\ service\ providerType\ for\ load\ balancer\ listener\ [uuid\:%s] = 无法获取负载平衡器侦听器[uuid:{0}]的Service ProviderType +service\ provider\ type\ mismatching.\ The\ load\ balancer[uuid\:%s]\ is\ provided\ by\ the\ service\ provider[type\:%s],\ but\ new\ service\ provider\ is\ [type\:\ %s] = there\ is\ listener\ with\ same\ port\ [%s]\ and\ same\ load\ balancer\ [uuid\:%s] = 存在具有相同端口[{0}]和相同负载平衡器[uuid:{1}]的侦听器 invalid\ health\ checking\ parameters[%s],\ the\ format\ is\ method\:URI\:code,\ for\ example,\ GET\:/index.html\:http_2xx = 无效的健康检查参数[{0}],正确格式:method:URI:code,例如 GET:/index.html:http_2xx cannot\ find\ the\ load\ balancer[uuid\:%s] = 无法找到负载均衡器[uuid:{0}] @@ -1807,18 +2124,28 @@ invalid\ balancer\ weight\ for\ nic\:%s,\ %d\ is\ not\ in\ the\ range\ [%d,\ %d] # In Module: localstorage disk\ capacity[%s\ bytes]\ required = local\ storage\ volume[uuid\:%s]\ is\ not\ on\ this\ host = +To\ create\ volume\ on\ the\ local\ primary\ storage,\ you\ must\ specify\ the\ host\ that\ the\ volume\ is\ going\ to\ be\ created\ using\ the\ system\ tag\ [%s] = 创建本地主存储硬盘时,必须指定硬盘要创建的虚拟机,请使用系统标签[{0}]指定虚拟机 +invalid\ uri,\ correct\ example\ is\ file\://$URL;hostUuid\://$HOSTUUID\ or\ volume\://$VOLUMEUUID\ or\ volumeSnapshotReuse\://$SNAPSHOTUUID = URI格式错误,正确格式为file://$URL;hostUuid://$HOSTUUID 或 volume://$VOLUMEUUID 或 volumeSnapshotReuse://$SNAPSHOTUUID the\ volume[uuid\:%s]\ is\ not\ on\ any\ local\ primary\ storage = 云盘[uuid:{0}]不在任一本地主存储上 the\ volume[uuid\:%s]\ is\ already\ on\ the\ host[uuid\:%s] = 云盘[uuid:{0}]已经在主机[uuid:{1}]上 the\ primary\ storage[uuid\:%s]\ is\ not\ found = 主存储[uuid:{0}]未找到 the\ primary\ storage[uuid\:%s]\ is\ disabled\ or\ maintenance\ cold\ migrate\ is\ not\ allowed = 主存储[uuid:{0}]为Disabled或维护状态时不允许冷迁移 +the\ dest\ host[uuid\:%s]\ doesn't\ belong\ to\ the\ local\ primary\ storage[uuid\:%s]\ where\ the\ volume[uuid\:%s]\ locates = 目标主机[uuid:{0}]不属于本地主存储[uuid:{1}],未找到硬盘[uuid:{2}] +the\ dest\ host[uuid\:%s]\ doesn't\ have\ enough\ physical\ capacity\ due\ to\ the\ threshold\ of\ primary\ storage[uuid\:%s]\ is\ %f\ but\ available\ physical\ capacity\ is\ %d = 目标主机[uuid:{0}]没有足够的物理容量,因为主存储[uuid:{1}]的阈值是{2},可用物理容量是{3} the\ volume[uuid\:%s]\ is\ not\ in\ status\ of\ Ready,\ cannot\ migrate\ it = 云盘[uuid:{0}]的状态不是Ready,不能迁移 +the\ data\ volume[uuid\:%s,\ name\:\ %s]\ is\ still\ attached\ to\ the\ VM[uuid\:%s].\ Please\ detach\ it\ before\ migration = 数据云盘[uuid:{0}, name: {1}]还挂载在虚拟机[uuid:{2}]上,请先卸载该硬盘 +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ is\ in\ state\ of\ %s,\ please\ stop\ it\ before\ migration = 硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前状态是{2},请先停止该虚拟机 +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ still\ has\ %s\ data\ volumes\ attached,\ please\ detach\ them\ before\ migration = 硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前有{2}个数据盘挂载,请先卸载这些硬盘 +the\ volume[uuid\:%s]\ is\ the\ root\ volume\ of\ the\ vm[uuid\:%s].\ Currently\ the\ vm\ still\ has\ ISO\ attached,\ please\ detach\ it\ before\ migration = 硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前有ISO挂载,迁移前请先卸载该ISO The\ clusterUuid\ of\ vm[uuid\:%s]\ cannot\ be\ null\ when\ migrate\ the\ root\ volume[uuid\:%s,\ name\:\ %s] = The\ two\ clusters[uuid\:%s,uuid\:%s]\ cannot\ access\ each\ other\ in\ l2\ network\ \ when\ migrate\ the\ vm[uuid\:%s]\ to\ another\ cluster = 两个集群[uuid:{0},uuid:{1}]无法在二层网络中互相访问对方,当迁移云主机[uuid:{2}]从其中一个集群到另一个集群时 the\ url[%s]\ is\ not\ an\ absolute\ path\ starting\ with\ '/' = url[{0}]不是一个以''/''开头的绝对路径 \ the\ url\ contains\ an\ invalid\ folder[/dev\ or\ /proc\ or\ /sys] = URL包含了一个无效的目录[/dev or /proc or /sys] The\ clusterUuid\ of\ vm\ cannot\ be\ null\ when\ migrate\ the\ vm = The\ primary\ storage[uuid\:%s]\ is\ disabled\ cold\ migrate\ is\ not\ allowed = 主存储[uuid:{0}]Disabled时不允许冷迁移 +volume[uuid\:%s]\ is\ not\ on\ the\ local\ storage\ anymore,it\ may\ have\ been\ deleted = 硬盘[uuid:{0}]不在本地存储上,可能已被删除 local\ primary\ storage[uuid\:%s]\ doesn't\ have\ the\ host[uuid\:%s] = +failed\ to\ download\ image[uuid\:%s]\ to\ all\ hosts\ in\ the\ local\ storage[uuid\:%s].\ %s = 尝试将镜像[uuid:{0}]下载到本地存储[uuid:{1}]的所有主机中失败。 unable\ to\ create\ the\ data\ volume[uuid\:\ %s]\ on\ a\ local\ primary\ storage[uuid\:%s],\ because\ the\ hostUuid\ is\ not\ specified. = 不能在本地主存储[uuid:{1}]上创建云盘[uuid:{0}],因为物理机uuid没有指定 No\ Host\ state\ is\ Enabled,\ Please\ check\ the\ availability\ of\ the\ host = 未启用物理机状态,请检查物理机的可用性 host[uuid\:%s]\ cannot\ access\ local\ storage[uuid\:%s],\ maybe\ it\ is\ detached = 物理机[uuid:{0}]无法访问本地存储[uuid:{1}],它可能已分离 @@ -1826,7 +2153,9 @@ resource[uuid\:%s,\ type\:\ %s]\ is\ not\ on\ the\ local\ primary\ storage[uuid\ resource[uuid\:%s,\ type\:\ %s]\ on\ the\ local\ primary\ storage[uuid\:%s]\ maps\ to\ multiple\ hypervisor%s = 本地主存储[uuid:{2}]上的资源[uuid:{0},类型:{1}]映射到多个云主机管理程序{3} cannot\ attach\ ISO\ to\ a\ primary\ storage[uuid\:%s]\ which\ is\ disabled = 无法将ISO附加到已禁用的主存储[uuid:{0}] host(s)[uuids\:\ %s]\ volume\ locate\ is\ not\ Connected. = +volume[uuid\:%s]\ has\ reference\ volume[%s],\ can\ not\ change\ volume\ type\ before\ flatten\ them\ and\ their\ descendants = 硬盘[uuid:{0}]有引用硬盘[uuid:{1}],不能改变硬盘类型,请先展开该硬盘和它的子硬盘 There\ is\ no\ LocalStorage\ primary\ storage[state\=%s,status\=%s]\ on\ the\ cluster[%s],\ when\ the\ cluster\ mounts\ multiple\ primary\ storage,\ the\ system\ uses\ the\ local\ primary\ storage\ by\ default.\ Check\ the\ state/status\ of\ primary\ storage\ and\ make\ sure\ they\ have\ been\ attached\ to\ clusters = 在集群[{2}]里没有LocalStorage主存储[state={0},status={1}],当集群挂载了多个主存储的时候,系统默认的是local主存储。检查主存储的状态并确定是否连接了集群 +the\ type\ of\ primary\ storage[uuid\:%s]\ chosen\ is\ not\ local\ storage,\ check\ if\ the\ resource\ can\ be\ created\ on\ other\ storage\ when\ cluster\ has\ attached\ local\ primary\ storage = 选择的主存储[uuid:{0}]的类型不是本地存储,请检查当集群有绑定了本地存储的时候,资源是否能被创建在非本地存储中 The\ cluster\ mounts\ multiple\ primary\ storage[%s(%s),\ other\ non-LocalStorage\ primary\ storage],\ primaryStorageUuidForDataVolume\ cannot\ be\ specified\ %s = 集群绑定了多个主存储[{0}({1}), 其他的非LocalStorage主存储],主存储云盘未进行指定{2} The\ cluster[uuid\=%s]\ mounts\ multiple\ primary\ storage[LocalStorage,\ other\ non-LocalStorage\ primary\ storage],\ You\ must\ specify\ the\ primary\ storage\ where\ the\ root\ disk\ is\ located = 集群[uuid={0}]绑定了多个主存储[LocalStorage, 其他非LocalStorage主存储],需要检验下云盘所在的主存储 The\ cluster[uuid\=%s]\ mounts\ multiple\ primary\ storage[LocalStorage,\ other\ non-LocalStorage\ primary\ storage],\ You\ must\ specify\ the\ primary\ storage\ where\ the\ data\ disk\ is\ located = 集群[uuid={0}]绑定了多个主存储[LocalStorage, 其他非LocalStorage主存储],需要检验下云盘所在的主存储。 @@ -1834,10 +2163,17 @@ no\ LocalStorageBackupStorageMediator\ supporting\ hypervisor[%s]\ and\ backup\ creation\ rely\ on\ image\ cache[uuid\:%s,\ locate\ host\ uuids\:\ [%s]],\ cannot\ create\ other\ places. = 创建依赖于镜像缓存[uuid:{0},定位物理机uuid:[{1}]],无法创建其他位置。 local\ storage\ doesn't\ support\ live\ migration\ for\ hypervisor[%s] = 本地存储不支持对虚拟化类型[{0}]进行热迁移 Can't\ attach\ volume\ to\ VM,\ no\ qualified\ cluster = 不能加载云盘到云主机上,没有可用集群 +cannot\ attach\ the\ data\ volume[uuid\:%s]\ to\ the\ vm[uuid\:%s].\ Both\ vm's\ root\ volume\ and\ the\ data\ volume\ are\ on\ local\ primary\ storage,\ but\ they\ are\ on\ different\ hosts.\ The\ root\ volume[uuid\:%s]\ is\ on\ the\ host[uuid\:%s]\ but\ the\ data\ volume[uuid\:\ %s]\ is\ on\ the\ host[uuid\:\ %s] = 不能加载硬盘[uuid:{0}]到虚拟机[uuid:{1}]上,硬盘和虚拟机的硬盘都位于本地存储,但是它们位于不同的主机上。虚拟机的硬盘位于主机[uuid:{3}]上,硬盘位于主机[uuid:{5}]上 +the\ data\ volume[name\:%s,\ uuid\:%s]\ is\ on\ the\ local\ storage[uuid\:%s];\ however,the\ host\ on\ which\ the\ data\ volume\ is\ has\ been\ deleted.\ Unable\ to\ recover\ this\ volume = 不能恢复硬盘[名称:{0},uuid:{1}],硬盘位于本地存储[uuid:{2}]上,但是该硬盘所在的主机已被删除 +unable\ to\ recover\ the\ vm[uuid\:%s,\ name\:%s].\ The\ vm's\ root\ volume\ is\ on\ the\ local\ storage[uuid\:%s];\ however,\ the\ host\ on\ which\ the\ root\ volume\ is\ has\ been\ deleted = 不能恢复虚拟机[uuid:{0},名称:{1}],虚拟机的硬盘位于本地存储[uuid:{2}]上,但是该硬盘所在的主机已被删除 +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ data\ volumes\ on\ local\ storage.\ Need\ detach\ all\ data\ volumes\ first. = 不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有硬盘位于本地存储上 +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ local\ storage.\ Only\ linux\ guest\ is\ supported.\ Current\ platform\ is\ [%s] = 不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有硬盘在本地存储上。仅 Linux 虚拟机支持热迁移, 当前平台为 [{1}] +unable\ to\ live\ migrate\ vm[uuid\:%s]\ with\ ISO\ on\ local\ storage.\ Need\ detach\ all\ ISO\ first. = 不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有ISO位于本地存储上,请先卸载ISO +To\ create\ data\ volume\ on\ the\ local\ primary\ storage,\ you\ must\ specify\ the\ host\ that\ the\ data\ volume\ is\ going\ to\ be\ created\ using\ the\ system\ tag\ [%s] = 创建本地存储上的数据盘时,必须指定创建该数据盘的主机,请使用系统标签[{0}]指定创建该数据盘的主机 the\ host[uuid\:%s]\ doesn't\ belong\ to\ the\ local\ primary\ storage[uuid\:%s] = 物理机[uuid:{0}] 不属于本地主存储[uuid:{1}] the\ local\ primary\ storage[uuid\:%s]\ has\ no\ hosts\ with\ enough\ disk\ capacity[%s\ bytes]\ required\ by\ the\ disk\ offering[uuid\:%s] = +the\ image[uuid\:%s,\ name\:\ %s]\ is\ not\ available\ to\ download\ on\ any\ backup\ storage\:\\n1.\ check\ if\ image\ is\ in\ status\ of\ Deleted\\n2.\ check\ if\ the\ backup\ storage\ on\ which\ the\ image\ is\ shown\ as\ Ready\ is\ attached\ to\ the\ zone[uuid\:%s] = 镜像[uuid:{0},名称:{1}]在任意备份存储上不可用:\\n1. 检查镜像是否处于删除状态\\n2. 检查镜像在哪个备份存储上处于就绪状态,该备份存储是否已挂载到该区域[uuid:{2}] root\ image\ has\ been\ deleted,\ cannot\ reimage\ now = 系统镜像已经被删除,无法重制云主机 -the\ volume[uuid;%s]\ is\ attached\ to\ a\ VM[uuid\:%s]\ which\ is\ in\ state\ of\ %s,\ cannot\ do\ the\ snapshot\ merge = 云盘[uuid;{0}] 挂载到处于{2}状态的云主机,不能合并快照 why\ volume[uuid\:%s,\ installPath\:%s]\ not\ in\ directory\ %s = 为什么卷[uuid:{0},InstallPath:{1}]不在目录{2}中 cannot\ find\ flag\ file\ [%s]\ on\ host\ [%s],\ because\:\ %s = 找不到标记文件[{0}](在物理机[{1}]上),因为:{2} cannot\ create\ flag\ file\ [%s]\ on\ host\ [%s],\ because\:\ %s = 无法在物理机[{1}]上创建标志文件[{0}],因为:{2} @@ -1846,9 +2182,13 @@ unable\ to\ create\ empty\ snapshot\ volume[name\:%s,\ installpath\:\ %s]\ on\ k unable\ to\ create\ an\ empty\ volume[uuid\:%s,\ name\:%s]\ on\ the\ kvm\ host[uuid\:%s] = 不能在物理机[uuid:{2}]上创建空云盘[uuid:{0}, name:{1}] failed\ to\ download\ bits\ from\ the\ SFTP\ backup\ storage[hostname\:%s,\ path\:\ %s]\ to\ the\ local\ primary\ storage[uuid\:%s,\ path\:\ %s],\ %s = 从SFTP镜像服务器[hostname:{0}, path: {1}] 下载到本地存储[uuid:{2}, path: {3}]失败,{4} failed\ to\ upload\ bits\ from\ the\ local\ storage[uuid\:%s,\ path\:%s]\ to\ the\ SFTP\ backup\ storage[hostname\:%s,\ path\:%s],\ %s = 从本地存储[uuid:{0}, path: {1}]上传到SFTP镜像服务器[hostname:{2}, path:{3}]失败, {4} +the\ required\ host[uuid\:%s]\ cannot\ satisfy\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes],\ or\ doesn't\ belong\ to\ a\ local\ primary\ storage\ satisfying\ conditions[state\:\ %s,\ status\:\ %s],\ or\ its\ cluster\ doesn't\ attach\ to\ any\ local\ primary\ storage = 不能满足条件[状态:{1}, 状态:{2}, 大小等于{3}字节]的主机[uuid:{0}],或者该主机不属于满足条件的本地存储[状态:{4}, 状态:{5}],或者该主机所属的集群没有绑定任何本地存储 +no\ local\ primary\ storage\ in\ zone[uuid\:%s]\ can\ satisfy\ conditions[state\:\ %s,\ status\:\ %s]\ or\ contain\ hosts\ satisfying\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes] = 没有满足条件的本地存储[状态:{1}, 状态:{2}]或者该本地存储中不能满足条件的主机[状态:{3}, 状态:{4}, 大小等于{5}字节] +no\ local\ primary\ storage\ can\ satisfy\ conditions[state\:\ %s,\ status\:\ %s]\ or\ contain\ hosts\ satisfying\ conditions[state\:\ %s,\ status\:\ %s,\ size\ >\ %s\ bytes] = 没有满足条件的本地存储[状态:{0}, 状态:{1}]或者该本地存储中不能满足条件主机[状态:{2}, 状态:{3}, 大小等于{4}字节] {the\ physical\ capacity\ usage\ of\ the\ host[uuid\:%s]\ has\ exceeded\ the\ threshold[%s]} = '{物理机[uuid:{0}']的物理容量使用率已超过阈值[{1}]} failed\ allocate\ localstorage = 分配localStorage失败 cannot\ reserve\ enough\ space\ for\ primary\ storage[uuid\:\ %s]\ on\ host[uuid\:\ %s],\ not\ enough\ physical\ capacity = 无法为物理机[uuid:{1}]上的主存储[uuid:{0}]保留足够的空间,物理容量不足 +host[uuid\:\ %s]\ of\ local\ primary\ storage[uuid\:\ %s]\ doesn't\ have\ enough\ capacity[current\:\ %s\ bytes,\ needed\:\ %s] = 主机[uuid:{0}]上的本地存储[uuid:{1}]没有足够的容量[当前:{2}字节,所需:{3}] cannot\ find\ any\ host\ which\ has\ resource[uuid\:%s] = 找不到任何拥有资源[uuid:{0}]的物理机 Resource[uuid\:%s]\ can\ only\ be\ operated\ on\ host[uuid\:%s],\ but\ the\ host\ has\ been\ deleted = 资源[uuid:{0}]只能在物理机[uuid:{0}]上对其操作,但是该物理机已经被删除了 @@ -1928,7 +2268,12 @@ the\ vip[uuid\:%s]\ already\ has\ bound\ to\ other\ service[%s] = 该虚拟IP[uu Current\ port\ range[%s,\ %s]\ is\ conflicted\ with\ system\ service\ port\ range\ [%s,\ %s]\ with\ vip[uuid\:\ %s]\ protocol\:\ %s\ = 当前使用的端口范围[{0}, {1}]和虚拟IP[uuid: {4}, 协议: {5}]已经使用的系统服务端口范围[{2}, {3}]冲突 Current\ port\ range[%s,\ %s]\ is\ conflicted\ with\ used\ port\ range\ [%s,\ %s]\ with\ vip[uuid\:\ %s]\ protocol\:\ %s\ = 当前使用的端口范围[{0}, {1}]和虚拟IP[uuid: {4}, 协议: {5}]已经使用的端口范围[{2}, {3}]冲突 +# In Module: memory-balloon +no\ data\ of\ %s\ found\ on\ host[%s] = + # In Module: mevoco +More\ than\ one\ BackupStorage\ on\ the\ same\ host\ identified\ by\ hostname.\ There\ has\ been\ a\ SftpBackupStorage\ [hostname\:%s]\ existing.\ The\ BackupStorage\ type\ to\ be\ added\ is\ %s.\ = 多个 BackupStorage 在相同的主机上被识别,已经存在一个 SftpBackupStorage[hostname:{0}]。要添加的 BackupStorage 类型是{1}。 +More\ than\ one\ BackupStorage\ on\ the\ same\ host\ identified\ by\ hostname.\ There\ has\ been\ an\ ImageStoreBackupStorage\ [hostname\:%s]\ existing.\ The\ BackupStorage\ type\ to\ be\ added\ is\ %s.\ = 多个 BackupStorage 在相同主机上被识别,已经存在一个 ImageStoreBackupStorage[hostname:{0}]。要添加的 BackupStorage 类型是{1} VM\ [uuid\:\ %s]\ has\ already\ been\ added\ to\ affinityGroup\ [uuid\:\ %s] = VM[uuid:{0}已经被添加到亲和组[uuid:{1}]中。] There\ are\ other\ VMs\ on\ this\ host\ [uuid\:\ %s]\ belonging\ to\ same\ affinityGroup\ [%s] = 在主机[uuid:{0}]上的虚拟机属于同一个亲和组中[{1}] affinityGroup\ [uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = 亲和组[uuid:{0}]为虚拟机[uuid:{2}]预分配主机资源[uuid:{1}]失败 @@ -1949,14 +2294,18 @@ can\ not\ detach\ interfaces\ repeatedly\ in\ a\ bond[%s]. = unable\ to\ find\ bonding[uuid\=%s] = cannot\ delete\ bonding\ corresponding\ to\ the\ management\ network = 无法删除与管理网络对应的绑定 cannot\ delete\ bonding\ configured\ with\ vtep\ ip = 无法删除使用VTEP IP配置的绑定 +cannot\ assign\ xmit_hash_policy\ [%s]\ for\ mode\ [%s],\ because\ only\ mode\ 802.3ad\ support\ specifying\ different\ xmit_hash_policys = xmit_hash_policy\ for\ mode\ [%s]\ should\ not\ be\ null = interface\ in\ slaveNames[%s]\ does\ not\ exist\ on\ the\ hosts = there\ is\ no\ interface[%s]\ on\ host[uuid\:%s] = there\ is\ no\ slave\ interface\ on\ the\ host[uuid\:%s] = can\ not\ have\ interfaces\ in\ a\ bond\ which\ is\ not\ on\ the\ same\ host[%s]. = cannot\ bind\ with\ interface\ corresponding\ to\ the\ management\ network. = +bonding\ card\ can\ not\ have\ occupied\ interfaces,\ which\ was\ already\ been\ used\ by\ bonding[uuid\:%s] = 绑定卡不能有被占用的接口,该接口已被绑定[uuid:{0}]使用 +bonding\ card\ can\ not\ have\ interfaces\ that\ has\ been\ used\ as\ a\ network\ bridge,\ which\ was\ already\ been\ used\ by\ host[%s] = 绑定卡不能有被使用的网络桥接接口,该接口已被主机[uuid:{0}]使用 bonding\ card\ can\ not\ have\ interfaces\ that\ has\ been\ pass-through = bonding\ card\ can\ not\ have\ interfaces\ with\ different\ speed,\ which\ is\ on\ the\ host[%s] = +bonding\ card\ can\ not\ have\ [%s]\ interfaces,it\ must\ be\ the\ number\ between[1~8] = 绑定卡不能有[{0}]个接口,该接口数量必须在[1~8]之间 [%s]\ bonding\ card\ can\ not\ have\ [%s]\ interfaces,\ it\ must\ be\ the\ number\ between[1~8] = [%s]\ bonding\ can\ not\ have\ [%s]\ interfaces,\ it\ must\ be\ the\ number\ between[1~2] = failed\ to\ add\ linux\ bonding\ to\ host[uuid\:%s]\ \:\ %s = 无法将Linux绑定添加到主机[uuid:{0}]:{1} @@ -1989,7 +2338,9 @@ can\ not\ set\ management\ network\ on\ bonding,\ because\ management\ is\ the\ no\ available\ network\ interface\ on\ the\ host\ to\ start\ the\ vm = 主机上没有可用于启动虚拟机的网络接口 vm\ security\ level\ not\ consistent\ with\ vms\ running\ on\ host = 虚拟机安全级别与主机上运行的虚拟机不一致 fail\ to\ update\ iscsi\ initiator\ name\ of\ host[uuid\:%s] = +failed\ to\ allocate\ pci\ device\ on\ host[uuid\:%s],\ because\ there\ are\ not\ enough\ pci\ devices\ available = 无法在主机[uuid:{0}]上分配 PCI 设备,因为没有足够的 PCI 设备可用 networkInterface[name\:%s]\ of\ host[uuid\:%s]\ can\ not\ find = 找不到主机[uuid:{1}]的网络接口[名称:{0}] +only\ support\ do\ live\ snapshot\ on\ vm\ state[%s],\ but\ vm\ is\ on\ [%s]\ state = 仅支持对处于[{0}]状态的虚拟机进行快照,但虚拟机处于[{1}]状态 primary\ storage\ type\ doesn't\ support\ sync\ qos\ from\ host = primary\ storage\ type\ doesn't\ support\ set\ qos = host[uuid\:%s]\ becomes\ power\ off,\ send\ notify = @@ -2033,6 +2384,8 @@ failed\ to\ post-handle\ vf\ nic\ after\ migrate\ vm[uuid\:%s]\ to\ host[uuid\:% failed\ to\ delete\ vHost\ User\ Client\ in\ host[uuid\:%s]\ for\ vm[uuid\:%s]\ \:\ %s = 无法删除虚拟机[uuid:{1}]的主机[uuid:{0}]中的vhost用户客户端:{2} failed\ to\ generate\ vHost\ User\ Client\ in\ host[uuid\:%s]\ for\ vm[uuid\:%s]\ \:\ %s = 无法在主机[uuid:{0}]中为VM[uuid:{1}]生成vhost用户客户端:{2} cannot\ generate\ vhost\ user\ client\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = 无法为目标主机[uuid:{1}]上的虚拟机[uuid:{0}]生成vhost用户客户端 +could\ not\ ungenerate\ pci\ device[uuid\:%s],\ becausethere\ are\ another\ l2[uuid\:%s]\ use\ the\ physical\ network\ interface\ attached\ to\ cluster = 无法取消生成PCI设备[uuid:{0}],因为该物理网络接口被其他二层网络[uuid:{1}]使用 +could\ not\ generate\ pci\ device[uuid\:%s],\ becausethere\ are\ another\ l2[uuid\:%s]\ use\ the\ physical\ network\ interface\ attached\ to\ cluster = 无法生成PCI设备[uuid:{0}],因为该物理网络接口被其他二层网络[uuid:{1}]使用 only\ %s\ support\ vdpa = 仅{0}支持VDPA cluster[uuid\:%s]\ do\ not\ support\ ovs-dpdk = 集群[uuid:{0}]不支持OVS-DPDK l2\ network[uuid\:%s]\ in\ host[uuid\:%s]\ is\ not\ sr-iov\ virtualized = @@ -2042,6 +2395,7 @@ cannot\ generate\ vdpa\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] release\ vdpa\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = restore\ vdpa\ for\ vm[uuid\:%s]\ from\ the\ destination\ host[uuid\:%s] = not\ dest\ host\ found\ in\ db,\ can't\ send\ change\ password\ cmd\ to\ the\ host! = 没有在主机上发现数据库,不能发送更改密码的指令到这个主机上 +not\ account\ preference\ found,\ \ send\ change\ password\ cmd\ to\ the\ host! = 没有发现 account preference,不能发送更改密码的指令到这个主机上 fail\ to\ attach\ virtio\ driver\ because\ read\ md5\ of\ file[%s]\ fail\ in\ mn[uuid\:%s]\:\ file\ not\ found\ on\ classpath = 无法附加virtio驱动程序,因为在Mn[uuid:{1}]中读取文件[{0}]的MD5失败:在类路径中找不到文件 fail\ to\ attach\ virtio\ driver\ because\ of\ invalid\ md5\ of\ file[%s]\ in\ mn[uuid\:%s] = 无法附加virtio驱动程序,因为Mn[uuid:{1}]中文件[{0}]的MD5无效 fail\ to\ attach\ virtio\ driver\ because\ read\ md5\ of\ file[%s]\ fail\ in\ mn[uuid\:%s]\:\ %s = 无法附加virtio驱动程序,因为在Mn[uuid:{1}]中读取文件[{0}]的MD5失败:{2} @@ -2051,16 +2405,23 @@ state\ of\ vm[uuid\:%s]\ is\ not\ in\ Running\ state,\ can\ not\ sync\ clock = V hot\ plug\ is\ not\ turned\ off,can\ not\ open\ vm\ numa = 热插拔未关闭,无法打开VM NUMA vm[uuid\:\ %s]'s\ state\ is\ not\ Stopped\ now,\ cannot\ operate\ 'changevmimage'\ action = VM[uuid:{0}]的状态现在未停止,无法执行“ ChangeVMImage ”操作 vm[uuid\:%s]\ cluster\ uuid\ is\ null,\ cannot\ change\ image\ for\ it = VM[uuid:{0}]集群uuid为空,无法更改其镜像 +vm[uuid\:%s]\ is\ in\ cluster[uuid\:%s],\ but\ there\ is\ no\ available\ host\ in\ the\ cluster,\ cannot\ change\ image\ for\ the\ vm = VM[uuid:{0}]位于集群[uuid:{1}]中,但集群中没有可用的主机,无法更改虚拟机镜像 +unable\ to\ allocate\ hosts,\ no\ host\ meets\ the\ following\ conditions\:\ clusterUuid\=%s\ hostUuid\=%s\ cpu\=%d\ memoryCapacity\=%d\ L3NetworkUuids\=%s = 无法分配主机,无满足以下条件的主机:集群uuid={0} 主机uuid={1} CPU={2} 内存容量={3} L3NetworkUuids={4} +can\ not\ find\ backup\ storage,\ unable\ to\ commit\ volume\ snapshot[psUuid\:%s]\ as\ image,\ destination\ required\ PS\ uuid\:%s = 无法找到数据存储,无法将卷快照[psUuid:{0}]作为镜像,目标需要的主存储 uuid:{1} direction\ must\ be\ set\ to\ in\ or\ out = 方法必须设置in或者out inboundBandwidth\ must\ be\ set\ no\ more\ than\ %s. = 下行带宽不能超过{0} outboundBandwidth\ must\ be\ set\ no\ more\ than\ %s. = 上行带宽不能超过{0} vm\ [%s]'\ state\ must\ be\ Running\ or\ Paused\ to\ sync\ nic\ qos = VM[{0}]状态必须为“正在运行”或“已暂停”才能同步NIC QoS vm\ [%s]'s\ HostUuid\ is\ null,\ cannot\ sync\ nic\ qos = VM[{0}]的Hostuuid为空,无法同步NIC QoS +not\ dest\ host\ found\ in\ db\ by\ uuid\:\ %s,\ can't\ send\ change\ password\ cmd\ to\ the\ host! = 无法在数据库中找到该主机的uuid:{0},无法向该主机发送修改密码命令 state\ is\ not\ correct\ while\ change\ password. = 该状态不支持修改密码 templated\ vm[uuid\:\ %s]\ cannot\ be\ create\ from\ vm\ with\ shareable\ volume[uuids\:\ %s] = +failed\ to\ convert\ vm\ to\ templated\ vm,\ because\ the\ vm\ has\ scheduled\ jobs\ [%s] = 转换虚拟机为模板虚拟机失败,该虚拟机有计划任务[{0}] The\ number\ of\ data\ volumes\ exceeds\ the\ limit[num\:\ %s],\ please\ reduce\ the\ number\ of\ data\ volumes\ during\ vm\ creation. = 数据云盘的数量超过限制[数量:{0}],请在创建虚拟机期间减少数据云盘的数量。 Can\ not\ set\ security\ level\ to\ not\ %s\ vm\ [uuid\:%s] = 设置密级失败,无法对不处于{0}状态的虚拟机操作[uuid:{1}] +can\ not\ set\ primaryStorageUuidForRootVolume\ or\ primaryStorageUuidForDataVolume\ or\ rootVolumeSystemTags\ or\ dataVolumeSystemTags\ when\ diskAOs\ is\ not\ empty = 不能在 diskAOs 不为空的情况下设置 primaryStorageUuidForRootVolume 或 primaryStorageUuidForDataVolume 或 rootVolumeSystemTags 或 dataVolumeSystemTags The\ operation\ only\ allows\ on\ user\ vm = 该操作仅允许在用户虚拟机上执行 +there\ are\ not\ enough\ capacity\ for\ full\ vm\ clone\ to\ vm[uuid\:\ %s],\ volumes[uuid\:\ %s]\ on\ primary\ storage[uuid\:\ %s]\ required\:\ %s\ bytes,\ current\ available\ capacity\ is\ %s\ bytes = 虚拟机克隆失败,因为存储空间不足,虚拟机[uuid:{0}]的云盘[uuid:{1}]在存储[uuid:{2}]上所需空间为 {3} 字节,当前可用空间为 {4} 字节 The\ nic\ [%s%s]\ is\ not\ mounted\ on\ the\ VM = 网卡[{0}]不能被挂载到虚拟机上 The\ operation\ only\ allows\ on\ user\ vm\ = 该操作只能在用户虚拟机上进行 The\ operation\ only\ allows\ when\ vm\ [%s]\ state\ is\ stopped\ = 该操作只有虚拟机[{0}]状态为已停止才能进行 @@ -2078,6 +2439,7 @@ the\ nic\ can't\ apply\ Qos\ with\ the\ port\ mirror\ service\ at\ same\ time. = nic\ id\:\ %s\ does\ not\ exist... = 网卡id: {0}不存在 The\ 'uuids'\ parameter\ must\ belong\ to\ the\ VmInstanceVO\ or\ HostVO = “ uuids ”参数必须属于vminstancevo或hostvo resource[uuids\:%s]\ is\ not\ owned\ by\ account[uuid\:%s] = 资源[uuid:{0}]不归帐户[uuid:{1}]所有 +the\ cache\ of\ a\ templated\ vmInstance[uuid\:%s]\ can\ contain\ only\ one\ or\ zero\ snapshot\ groups.\ the\ current\ number\ of\ snapshot\ groups\ is\ %d. = 模板虚拟机[uuid:{0}] 的快照组只能包含一个或零个快照组,当前快照组数量为 {1} the\ templated\ vmInstance[uuid\:%s]\ is\ not\ exist = password\ length\ must\ be\ [%s-%s] = 密码长度必须为[{0}-{1}] password\ does\ not\ match\ numbers,\ uppercase\ and\ lowercase,\ and\ special\ character\ combinations = 密码与数字、大小写和特殊字符组合不匹配 @@ -2093,14 +2455,33 @@ invalid\ cpu\ set\ [%s] = CPU集[{0}]无效 the\ host[uuid\:%s]\ already\ attached\ to\ host\ scheduling\ group[uuid\:%s] = 主机[uuid:{0}]已连接到主机调度组[uuid:{1}] host\ clusterUuid\ is\ null = 主机Clusteruuid为空 hosts\ that\ you\ can\ add\ to\ a\ host\ scheduling\ group\ must\ be\ enabled\ and\ connected\ to\ the\ MN. = 您可以添加到主机调度组的主机必须启用并连接到Mn。 +unmatched\ zone\ detected,\ host[uuid\:\ %s,\ zone\ uuid\:\ %s]'s\ zone\ is\ different\ from\ host\ sheduling\ rule\ group[uuid\:\ %s,\ zone\ uuid\:\ %s] = 未匹配的区被检测到,主机 [uuid:{0},区域uuid:{1}] 的区域与主机调度组 [uuid:{2},区域uuid:{3}] 的区域不同 vm[uuid\:%s]\ already\ attached\ to\ vm\ scheduling\ group[uuid\:%s] = 虚拟机[uuid:{0}]已连接到虚拟机调度组[uuid:{1}] +unmatched\ zone\ detected,\ vm[uuid\:\ %s,\ zone\ uuid\:\ %s]'s\ zone\ is\ different\ from\ vm\ sheduling\ rule\ group[uuid\:\ %s,\ zone\ uuid\:\ %s] = 未匹配的区被检测到,虚拟机 [uuid:{0},区域uuid:{1}] 的区域与虚拟机调度组 [uuid:{2},区域uuid:{3}] 的区域不同 vm\ can\ change\ its\ vm\ scheduling\ group\ only\ in\ state\ [%s,%s],\ but\ vm\ is\ in\ state\ [%s] = VM只能在状态[{0},{1}]下更改其VM调度组,但VM处于状态[{2}] cannot\ operate\ vpc\ vm\ scheduling\ group = 无法运行VPC虚拟机调度组 zoneUuid\ is\ not\ null = zoneUuid不为空 the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ Affinitive\ to\ Hosts\ scheduling\ policy\ attached = 该虚拟机调度组已绑定聚集虚拟机调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vms\ antiaffinity\ from\ hosts\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vm\ antiaffinity\ from\ Each\ Other\ scheduling\ rule\ to\ the\ group = 该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vms\ antiaffinity\ from\ Hosts\ scheduling\ policy\ to\ the\ group. = 该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ attaching\ another\ one\ is\ not\ allowed. = 该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ rule\ attached.\ Attaching\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ is\ not\ allowed. = 该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ rule\ attached.attaching\ another\ one\ is\ not\ allowed. = 该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ exclusive\ from\ each\ other\ scheduling\ rule\ attached.\ Attaching\ a\ vm\ affinitive\ to\ each\ other\ scheduling\ policy\ is\ not\ allowed. = 该虚拟机调度组 {0} 已绑定虚拟机独占性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ executed\ exclusive\ vm\ or\ affinitive\ vm\ scheduling\ policy\ attached.\ you\ cannot\ attach\ either\ of\ the\ two\ scheduling\ policies\ that\ require\ execution\ to\ the\ group\ again = 该虚拟机调度组 {0} 已绑定虚拟机执行调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vms\ affinitive\ to\ hosts\ scheduling\ rule\ attached.\ you\ cannot\ attach\ another\ one\ to\ the\ group\ again. = 该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ antiaffinity\ from\ host\ scheduling\ rule\ attached.\ you\ cannot\ attach\ a\ vms\ affinitive\ to\ host\ scheduling\ rule\ to\ the\ group. = 该虚拟机调度组 {0} 已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group[uuid\:%s]\ has\ already\ had\ a\ vm\ affinitive\ to\ hosts\ scheduling\ rule\ attached.\ you\ cannot\ attach\ a\ vm\ antiaffinity\ from\ hosts\ scheduling\ rule\ to\ the\ group. = 该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略 +the\ vm\ scheduling\ group\ has\ already\ had\ a\ vm\ antiaffinity\ from\ each\ other\ scheduling\ rule\ attached.\ the\ number\ of\ hosts\ available\ for\ the\ vm\ in\ the\ scheduling\ group\ to\ run\ is\ less\ than\ that\ of\ the\ vm\ in\ the\ group.\ you\ cannot\ attach\ a\ vms\ affinitive\ to\ hosts\ scheduling\ policy\ to\ the\ group. = 该虚拟机调度组已绑定虚拟机反亲和性调度策略,无法绑定其它虚拟机调度策略 can\ not\ satisfied\ vm\ scheduling\ rule\ group\ conditions = 无法满足VM调度规则组条件 vm\ scheduling\ group[uuid\:%s]\ reserve\ host\ [uuid\:%s]\ for\ vm\ [uuid\:\ %s]\ failed = 虚拟机调度组[uuid:{0}]为虚拟机[uuid:{2}]保留主机[UuId:{1}]失败 +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],which\ does\ not\ comply\ with\ the\ scheduling\ rule\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = 虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{2}]关联的调度规则。 hostGroup[uuid\:%s]\ is\ no\ host = 主机组[uuid:{0}]不是主机 +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],\ which\ does\ not\ comply\ with\ the\ scheduling\ rule[%s]\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = 虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{3}]关联的调度规则[{2}] +vm[uuid\:%s]\ is\ now\ running\ on\ host[uuid\:%s],which\ does\ not\ comply\ with\ the\ scheduling\ rule[%s]\ associated\ with\ vm\ scheduling\ group[uuid\:%s]. = 虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{3}]关联的调度规则[{2}] +cannot\ find\ the\ host\ scheduling\ group[uuid\:%s],\ it\ may\ have\ been\ deleted = 无法找到主机调度组[uuid:{0}],可能已被删除 +cannot\ find\ the\ vm\ scheduling\ rule[uuid\:%s],\ it\ may\ have\ been\ deleted = 无法找到虚拟机调度规则[uuid:{0}],可能已被删除 +cannot\ find\ the\ vm\ scheduling\ group[uuid\:%s],\ it\ may\ have\ been\ deleted = 无法找到虚拟机调度组[uuid:{0}],可能已被删除 failed\ to\ parse\ API\ message\:\ can\ not\ parse\ encryption\ param\ with\ type\ %s = 未能分析API消息:无法分析类型为{0}的加密参数 failed\ to\ parse\ API\ message\:\ cipher\ text\ can\ not\ be\ parsed,\ type\=%s = 未能分析 API 消息:解析 {0} 类型的密文失败 failed\ to\ parse\ API\ message\:\ found\ %d\ encryption\ param\ system\ tags,\ expect\ 1 = 未能分析API消息:找到{0}个加密参数系统标记,应为1个 @@ -2141,6 +2522,7 @@ enter\ the\ new\ value\ here,\ empty\ means\ no\ change. = 在此输入新值, some\ error\ happened,\ skip\ management\ node\ power\ off = 某些错误发生,跳过管理节点关机 Failed\ to\ set\ security\ level,\ because\ security\ level\ is\ disabled. = 设置密级失败,因为密级功能已禁用 Unknown\ security\ level\ code[%s],\ supported\ values\ are\ %s = 未知的密级[{0}],支持的值有[{1}] +Parse\ license\ error,\\n1.\ check\ your\ private\ key\ and\ application\ code\ is\ correct\\n2.\ check\ your\ license\ is\ not\ corrupted\\n3.\ use\ zstack-ctl\ clear_license\ to\ clear\ your\ licenses\ and\ try\ to\ reinstall\\n = 解析许可证错误,\\n1. 检查您的私钥和应用程序代码是否正确\\n2. 检查您的许可证是否损坏\\n3. 使用zstack-ctl clear_license来清除您的许可证并重新安装 the\ licenseRequestCode\ is\ illegal = 许可证请求代码不合法 Unexpected\ decoded\ license\ file\ length\:\ %d = 意外的解码许可证文件长度:{0} Decode\ fail\ because\ %s = 解码失败,因为{0} @@ -2167,6 +2549,8 @@ issue\ date\ of\ platform\ license\ is\ earlier\ than\ the\ existing\ license\ i add-on\ license\ is\ not\ support\ when\ license\ is\ Community\ /\ Trial = 社区版下不支持增值模块许可证 failed\ to\ update\ license = 更新许可证失败 not\ supported\:\ delete\ license[%s]\ from\ USB-key = 不支持:从USB-KEY删除许可证[{0}] +Hybrid\ platform\ license\ is\ already\ in\ use.\ You\ should\ remove\ Hybrid\ platform\ license\ and\ hybird\ add-ons\ license\ at\ the\ same\ timeby\ DeleteLicenseAction\ with\ Hybrid\ license\ UUID[uuid\=%s] = 混合平台许可证已使用。您应该同时删除混合平台许可证和混合模块许可证 +The\ system's\ thumbprint\ has\ changed.\\n\ Detailed\ errors\:\ %s.\\n\ If\ you\ are\ setting\ up\ a\ new\ system\ or\ changing\ an\ existing\ system,\ please\ follow\ the\ commands\ below\:\\n\ 1.\ run\ `zstack-ctl\ clear_license`\ to\ clear\ and\ backup\ old\ license\ files\\n\ \ \ \ or\ delete\ the\ license\ file\ on\ path\ %s\ 2.\ contact\ sales@zstack.io\ to\ apply\ a\ license;\\n\ 3.\ run\ `zstack-ctl\ install_license\ -f\ path/to/your/license`;\\n\ 4.\ run\ `zstack-ctl\ start`\ to\ start\ management\ node.\\n = 系统的指纹已改变,详情如下 {0}。如果是新系统或正在改变一个现有的系统,请按照以下命令进行操作:\\n 1. 运行 zstack-ctl clear_license 来清除并备份旧的许可证文件或者删除路径 {1} 的许可证文件 2. 联系 sales@zstack.io 申请许可证; 3. 执行 `zstack-ctl install_license -f path/to/your/license`; 4. 执行 `zstack-ctl start` 启动管理节点 Unexpected\ thumbprint = 未期望的错误 Platform\ license\ expired. = 平台许可证已过期 Found\ Xinchuang\ host,\ but\ the\ type\ of\ license\ does\ not\ match. = 当前许可证不支持信创类型的主机 @@ -2187,6 +2571,7 @@ invalid\ volume\ IOPS[%s]\ is\ larger\ than\ %d = 云盘IOPS[{0}]大于{1}是无 L3\ network[uuid\:%s]\ not\ found.\ Please\ correct\ your\ system\ tag[%s]\ of\ static\ IP = 找不到三层网络[uuid:0]。请确认静态IP的系统标签 Unknown\ code[%s]\ of\ Security\ Level = 安全级别的未知代码[{0}] [%s]\ is\ not\ a\ standard\ cidr = [{0}]不是标准CIDR +the\ host[uuid\:%s]'s\ operating\ system\ %s\ %s\ is\ too\ old,\ the\ QEMU\ doesn't\ support\ QoS\ of\ network\ or\ disk\ IO.\ Please\ choose\ another\ instance\ offering\ with\ no\ QoS\ configuration = 宿主机[uuid:{0}]的操作系统{1} {2}过旧,QEMU不支持网络或磁盘IO的QoS。请选择另一个实例规格,该实例规格没有QoS配置 invalid\ value[%s],\ it\ must\ be\ a\ double\ greater\ than\ 0 = 错误的值[{0}],必须是一个大于0的双精度值 invalid\ value[%s],\ it\ must\ be\ a\ double\ between\ (0,\ 1] = 错误的值[{0}],这个必须在0~1之间的双精度值 invalid\ value[%s],\ it's\ not\ a\ double = 错误的值[{0}],这个不是双精度值 @@ -2201,6 +2586,7 @@ can\ not\ find\ node\ A\ address\ info\ from\ bootstrap\ agent = 无法从启动 can\ not\ get\ bootstrap\ job\ %s\ result\ after\ 900s = 无法在900秒后获取引导程序作业{0}结果 curl\ bootstrap\ agent\ finished,\ return\ code\:\ %s,\ stdout\:\ %s,\ stderr\:\ %s = cURL引导代理已完成,返回代码:{0},标准输出:{1},标准错误:{2} VM\ instance[uuid\:\ %s]\ not\ found = +failed\ to\ create\ cache\ for\ templated\ vmInstance\ %s,\ because\ %s = 创建模板虚拟机 {0} 的缓存失败,因为 {1} all\ management\ node\ update\ factory\ mode\ failed,\ details\:\ %s = 所有管理节点更新工厂模式失败,详细信息:{0} node\ A\ update\ factory\ mode\ failed,\ details\:\ %s = 节点A更新工厂模式失败,详细信息:{0} some\ node\ on\ factory\ mode\ exists,\ detail\ of\ arping\:\ %s = 工厂模式上的某些节点存在,ARPING的详细信息:{0} @@ -2216,9 +2602,13 @@ API[%s]\ is\ not\ allowed\ for\ the\ ZSV\ license = API[%s]\ is\ not\ allowed\ for\ the\ community-source\ license,\ please\ apply\ an\ enterprise\ license = Shareable\ Volume[uuid\:%s]\ has\ already\ been\ attached\ to\ VM[uuid\:%s] = 共享云盘[uuid:{0}]已经挂载到虚拟机[uuid:{1}]上 shareable\ disk\ only\ support\ virtio-scsi\ type\ for\ now = 目前共享盘只支持virtio-scsi +shareable\ volume(s)[uuid\:\ %s]\ attached,\ not\ support\ to\ group\ snapshot. = 共享云盘 [uuid:{0}] 已经挂载,不支持快照组 the\ license\ has\ been\ expired,\ please\ renew\ it = invalid\ volume[%s]\ iothread\ pin[%s]! = Failed\ set\ iothread[%d]\ pin[%s]\ on\ vm[%s]\:\ %s. = 无法在VM[{2}]上设置IOThread[{0}]Pin[{1}]:{3}。 +can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = 无法在云盘 [uuid:{0}] 的已使用快照树中找到快照。可能快照链不需要验证 +can\ not\ found\ latest\ snapshot\ from\ tree[uuid\:\ %s]\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = 无法在云盘 [uuid:{1}] 的快照树 [uuid: {0}] 中找到最新快照。可能快照链不需要验证 +can\ not\ found\ snapshots\ from\ tree[uuid\:\ %s]\ of\ volume[uuid\:\ %s].\ Maybe\ no\ snapshot\ chain\ need\ to\ validate. = 无法在云盘 [uuid:{1}] 的快照树 [uuid: {0}] 中找到快照。可能快照链不需要验证 Unexpectedly,\ VM[uuid\:%s]\ is\ not\ running\ any\ more,\ please\ try\ again\ later = 意外的是,VM[uuid:{0}]不再运行,请稍后再试 How\ can\ a\ Running\ VM[uuid\:%s]\ has\ no\ hostUuid? = 正在运行的VM[uuid:{0}]怎么会没有HOSTuuid? can\ not\ take\ snapshot\ for\ volumes[%s]\ while\ volume[uuid\:\ %s]\ not\ attached = 当云盘[uuid:{1}]未加载时,无法给云盘[{0}]创建快照 @@ -2227,6 +2617,7 @@ can\ not\ take\ snapshot\ for\ volumes[%s]\ attached\ multiple\ vms[%s,\ %s] = no\ volumes\ found = 找不到云盘 this\ snapshot\ recording\ the\ volume\ state\ before\ resize\ to\ %fG\ is\ created\ automatically = 该快照记录云盘扩容到{0}G之前的状态,由系统自动创建 DeleteVolumeQos\ [%s]\ ignore\ because\ of\ account\ privilege. = DeleteVolumeQoS[{0}]由于帐户权限而忽略。 +Cannot\ delete\ vm's\ volume\ qos\ on\ host\ %s,\ because\ the\ current\ vm\ is\ in\ state\ of\ %s,\ but\ support\ expect\ states\ are\ [%s,\ %s] = 不能在HOST [uuid:{0}] 上删除VM [uuid:{1}] 的云盘QoS,因为VM当前状态为 {2},但支持的期望状态为[{3}, {4}] SetVolumeQosMsg\ version\ 1\ is\ deprecated,\ please\ use\ version\ 2 = non\ admin\ account\ cannot\ set\ bandwidth\ more\ than\ %s = 非管理员帐户无法设置大于{0}的带宽 unknown\ message\ version. = 未知消息版本。 @@ -2246,6 +2637,7 @@ failed\ to\ detach\ shareable\ volume[uuid\:%s]\ from\ VmInstance[uuid\:%s] = failed\ to\ detach\ shareable\ volume\ from\ VmInstance\:[\\n%s] = 不能卸载虚拟机上的共享盘,原因是{0} unsupported\ operation\ for\ setting\ root\ volume[%s]\ multiQueues. = 不支持设置根卷[{0}]多队列的操作。 unsupported\ operation\ for\ setting\ virtio-scsi\ volume[%s]\ multiQueues. = 不支持设置virtio-SCSI卷[{0}]多队列的操作。 +ZStack\ has\ been\ paused,\ reject\ all\ API\ which\ are\ not\ read\ only.\ If\ you\ really\ want\ to\ call\ it\ and\ known\ the\ consequence,\ add\ '%s'\ into\ systemTags. = ZStack已暂停,拒绝所有非只读API。如果您确定要调用它,并已了解其结果的含义,请将''{0}''添加到系统标签中。 the\ current\ version\ of\ license\ does\ not\ support\ modifying\ this\ global\ config\ [name\:%s] = 当前license版本不支持修改此全局设置[name:{0}] the\ current\ version\ of\ license\ does\ not\ support\ modifying\ this\ resource\ config\ [name\:%s] = 当前版本的许可证不支持修改此资源配置[名称:{0}] cannot\ find\ mode\ from\ null\ VolumeQos = 无法从NULL卷中找到模式QoS @@ -2256,6 +2648,7 @@ the\ resource[type\:%s]\ doesn't\ have\ any\ monitoring\ items = 该资源[type: the\ resource[uuid\:%s]\ doesn't\ belong\ to\ the\ account[uuid\:%s] = 该资源[uuid:{0}]不属于账户[uuid:{1}] cannot\ find\ type\ for\ the\ resource[uuid\:%s] = 未找到资源[uuid:{0}]这种类型 no\ monitoring\ item\ found\ for\ the\ resourceType[%s]\ and\ item[%s] = 未找到资源类型[{0}]和条目[{1}]这种监控条目 +A\ resource[name\:{resourceName},\ uuid\:{resourceUuid},\ type\:{resourceType}]'s\ monitoring\ trigger[uuid\:{triggerUuid}]\ changes\ status\ to\ {triggerStatus} = 一个资源[name:'{resourceName}', uuid:'{resourceUuid}', type:'{resourceType}']''s monitoring 触发器 [uuid:'{triggerUuid}'] 状态改为 '{triggerStatus}' \\n\=\=\=\ BELOW\ ARE\ DETAILS\ OF\ THE\ PREVIOUS\ ALERT\ \=\=\= = \\n=== 以下是上一次警告内容 === \\nalert\ details\: = \\n警告内容: \\ncondition\:\ {itemName}\ {operator}\ {threshold} = \\n环境: '{itemName}' '{operator}' '{threshold}' @@ -2265,6 +2658,7 @@ VM\ CPU\ utilization = 虚拟机CPU使用率 The\ problem\ may\ be\ caused\ by\ an\ incorrect\ user\ name\ or\ password\ or\ email\ permission\ denied = 导致操作失败的原因可能是不正确的用户名、密码或邮件访问权限不足 Couldn't\ connect\ to\ host,\ port\:\ %s,\ %d.\ The\ problem\ may\ be\ caused\ by\ an\ incorrect\ smtpServer\ or\ smtpPort = 连接{0}:{1}超时,导致原因可能是不正确的邮件服务器和邮件服务器端口 conflict\ alert\ rule[%s],\ there\ has\ been\ a\ rule[%s]\ with\ the\ same\ name = 冲突提示规则[{0}],这里已经存在和它一样名称的规则 +ALERT\:\\n\ resource[name\:\ %s,\ uuid\:\ %s,\ type\:\ %s]\\nevent\:\ %s\ %s\ %s\\ncurrent\ value\:\ %s\\nduration\:\ %s\ seconds\\n = 告警:\\n 资源[name: {0}, uuid: {1}, type: {2}]\\nevent: {3} {4} {5}\\n当前值: {6}\\n持续时间: {7} 秒\\n the\ relativeTime[%s]\ is\ invalid,\ it\ must\ be\ in\ format\ of,\ for\ example,\ 10s,\ 1h = 相关时间[{0}]不合法,格式必须例如10s,1h the\ relativeTime[%s]\ is\ invalid,\ it's\ too\ big = 相关时间[{0}]不合法,值''s 过大 CPU\ number = CPU数量 @@ -2330,6 +2724,7 @@ cannot\ sr-iov\ virtualize\ pci\ devices\ on\ interface[uuid\:%s]\ that\ are\ be pci\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ not\ sriov\ virtualized = PCI设备[uuid:{0}]不存在,或者未处于SRIOV虚拟化状态 virtual\ pci\ devices\ generated\ from\ pci\ devices\ in\ host[uuid\:%s]\ still\ attached\ to\ vm = 主机[uuid:{0}]上存在仍处于已挂载状态的虚拟PCI设备,无法执行虚拟化还原操作 sub-devices\ of\ pci\ device[uuid\:%s]\ are\ attached\ to\ paused\ VMs[uuids\:%s],\ please\ detach\ them\ first = PCI设备[uuid:{0}]的子设备已挂载到暂停的虚拟机[uuids:{1}],请先卸载它们 +pci\ device[uuid\:%s]\ cannot\ be\ virtualized\ into\ mdevs,\ make\ sure\ it's\ enabled\ and\ un-attached = PCI设备 [uuid:{0}] 无法被 MDEV 虚拟化切分,请确保设备已启用且未挂载 pci\ device[uuid\:%s]\ cannot\ be\ virtualized\ by\ mdev\ spec[uuid\:%s] = PCI设备[uuid:{0}]无法使用MDEV设备规格[uuid:{1}]进行虚拟化切分 pci\ device[uuid\:%s]\ is\ not\ virtualized\ into\ mdevs = PCI设备[uuid:{0}]未处于VFIO_MDEV虚拟化状态 mdev\ devices\ generated\ from\ pci\ device[uuid\:%s]\ still\ attached\ to\ vm = PCI设备[uuid:{0}]切分出的MDEV设备仍处于已挂载状态,无法执行虚拟化还原操作 @@ -2337,6 +2732,7 @@ the\ host[uuid\:%s]\ that\ pci\ device[uuid\:%s]\ in\ is\ not\ Connected = PCI please\ umount\ all\ GPU\ devices\ of\ the\ vm[%s]\ and\ try\ again = 请卸载虚拟机[{0}]的所有GPU设备,然后重试 please\ umount\ all\ vGPU\ devices\ of\ the\ vm[%s]\ and\ try\ again = 请卸载虚拟机[{0}]的所有vGPU设备,然后重试 please\ umount\ other\ pci\ devices\ of\ the\ vm[%s]\ and\ try\ again = 请卸载VM[{0}]的其他PCI设备,然后重试 +specified\ pci\ devices\ are\ not\ on\ the\ same\ host\:\ pci\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s]\ while\ pci\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s] = 指定的PCI设备不是在同一个主机上: PCI 设备[uuid:{0}] 在主机[uuid:{1}]上,而PCI 设备[uuid:{2}] 在主机[uuid:{3}]上 the\ PCI\ devices[uuid\:%s]\ is\ not\ on\ this\ host = failed\ to\ start\ vm[uuid\:%s]\ because\ not\ all\ pci\ specs[uuids\:%s]\ exist = 虚拟机[uuid:{0}]启动失败,因为所设置的PCI设备规格[uuids:{1}]中有部分不存在 not\ enough\ PCI\ devices = @@ -2354,6 +2750,7 @@ pci\ device\ spec[uuid\:%s]\ doesn't\ exists = PCI设备规范[uuid:{0}]不存 illegal\ type[%s]\ for\ pci\ device\ spec,\ only\ %s\ are\ legal = PCI设备规范的类型[{0}]非法,只有{1}合法 pci\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled\ for\ vm[uuid\:%s] = PCI设备[uuid:{0}]不存在或已为虚拟机[uuid:{1}]禁用 pci\ device[uuid\:%s]\ can\ not\ attach\ to\ vm[uuid\:%s]\ due\ to\ wrong\ status = 由于状态错误,PCI设备[uuid:{0}]无法连接到虚拟机[uuid:{1}] +The\ host\ [%s]\ has\ failed\ to\ enter\ the\ maintenance,\ The\ vm\ [%s]\ cannot\ migrate\ automatically\ because\ it\ contains\ the\ PCI\ device = 主机[uuid:{0}]已进入维护模式,虚拟机[uuid:{1}]无法自动迁移,因为它包含PCI设备 don't\ set\ rom\ version\ if\ has\ no\ rom\ content = too\ large\ pci\ device\ rom\ file = don't\ set\ rom\ content/version\ if\ you\ want\ to\ abandon\ rom\ from\ the\ spec = @@ -2394,9 +2791,11 @@ failed\ to\ get\ candidate\ hosts\ to\ start\ vm[uuid\:%s],\ %s = 无法为虚 pci\ device[uuid\:%s]\ is\ known\ as\ %s,\ but\ cannot\ find\ it's\ mdev\ spec,\ so\ abort. = PCI设备[uuid:{0}]是{1},但无法找到可用的MDEV设备规格 failed\ to\ start\ vm[uuid\:%s]\ because\ not\ all\ mdev\ specs[uuids\:%s]\ exist = 虚拟机[uuid:{0}]启动失败,由于所设置的MDEV设备规格[uuids:{1}]中有部分不存在 not\ enough\ Mdev\ devices = +specified\ mdev\ devices\ not\ on\ same\ host\:\ mdev\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s]\ while\ mdev\ device[uuid\:\ %s]\ on\ host[uuid\:\ %s] = 指定的 MDEV 设备不在同一个主机上:MDEV 设备[uuid: {0}]在主机[uuid: {1}]上,而 MDEV 设备[uuid: {2}]在主机[uuid: {3}]上 the\ Mdev\ devices[uuid\:%s]\ is\ not\ on\ this\ host = IOMMU\ state\ is\ not\ enabled = IOMMU\ status\ is\ not\ active = +The\ host\ [%s]\ has\ failed\ to\ enter\ the\ maintenance,\ because\ vm[%s]\ has\ mdev\ devices\ attached\ and\ cannot\ migrate\ automatically = 主机[uuid:{0}]无法进入维护模式,因为虚拟机[uuid:{1}]有MDEV设备已连接,无法自动迁移 failed\ to\ find\ enough\ mdev\ device\ of\ spec[uuid\:%s]\ in\ dest\ host[uuid\:%s]\ for\ vm[uuid\:%s] = 无法在主机[uuid:{1}]上为虚拟机[uuid:{2}]找到足够多满足规格[uuid:{0}]的MDEV设备 cannot\ find\ mdev\ device[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到MDEV设备[uuid:{0}] mdev\ device[uuid\:%s]\ doesn't\ exist\ or\ is\ disabled\ for\ vm[uuid\:%s] = MDEV设备[uuid:{0}]不存在或已为VM[uuid:{1}]禁用 @@ -2432,13 +2831,14 @@ trigger\ job[uuid\:\ %s]\ failed,\ because\ %s = 触发器作业[uuid:{0}]失 trigger\ job\ group[uuid\:\ %s]\ failed,\ because\ %s = field[%s]\ cannot\ be\ empty = 字段[{0}]不能为空 the\ volume[%s]\ is\ not\ root\ volume = 卷[{0}]不是根卷 +the\ vm\ of\ the\ root\ volume[%s]\ is\ not\ available.\ check\ if\ the\ vm\ exists. = 虚拟机的根盘 {0} 不存在,请检查虚拟机是否存在 snapshotGroupMaxNumber\ \:\ %s\ format\ error\ because\ %s = SnapshotGroupMaxNumber:{0}格式错误,原因是{1} +the\ volume[%s]\ is\ not\ available.\ check\ if\ the\ volume\ exists. = 盘[{0}]不存在,请检查卷是否存在 the\ volume[%s]\ does\ not\ support\ snapshots\ retention = 卷[{0}]不支持快照保留 snapshotMaxNumber\ \:\ %s\ format\ error\ because\ %s = snapshotMaxNumber : {0} 转换类型失败,因为{1} +the\ vm\ of\ the\ root\ volume[%s]\ state\ in\ Destroyed.\ job\ state\ change\ is\ not\ allowed = 虚拟机的根盘 {0} 状态为Destroyed,不允许更改状态 volume[uuid\:%s]\ is\ deleted,\ state\ change\ is\ not\ allowed = 卷[uuid:{0}]已删除,不允许更改状态 vm[uuid\:%s]\ is\ destroyed,\ state\ change\ is\ not\ allowed = VM[uuid:{0}]已销毁,不允许更改状态 -unable\ to\ allocate\ backup\ storage\ specified\ by\ uuids\:\ %s,\ becasue\:\ %s = -No\ backup\ storage\ to\ commit\ volume\ [uuid\:\ %s] = unable\ to\ commit\ backup\ storage\ specified\ by\ uuids\:\ %s = unable\ to\ connect\ to\ SimpleHttpBackupStorage[url\:%s],\ because\ %s = 无法连接到SimpleHttpBackupStorage[url:{0}],因为{1} Missing\ cert\ file\ for\ downloading\ image\:\ %s = 下载镜像时证书文件丢失 @@ -2455,6 +2855,7 @@ image[%s]\ not\ found\ on\ backup\ storage[%s] = 在备份存储[{1}]上找不 the\ backup\ storage[uuid\:%s]\ has\ not\ enough\ capacity[%s]\ to\ export = 备份存储[uuid:{0}]没有足够的容量[{1}]用于导出 image\ store\ [%s]\ cannot\ add\ image,\ because\ it\ is\ used\ for\ backup\ remote = ImageStore[{0}]不能添加镜像,因为它已经被远程镜像使用 commercial\ license\ is\ required\ to\ use\ ImageStore = 使用ImageStore需要商业许可证 +the\ uuid\ of\ imagestoreBackupStorage\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = ImageStoreBackupStorage的代理的uuid已改变[期望:{0},实际:{1}],这可能是代理被手动重启了。请重新连接以同步状态 get\ image\ hash\ failed,\ because\:%s = unable\ to\ reconnect\ target\ server\:\ %s,\ detail\ error\ info\:\ %s = miss\ image\ path\ on\ bs[%s] = @@ -2505,6 +2906,8 @@ the\ volume[uuid\:%s]\ is\ still\ attached\ on\ vm[uuid\:%s],\ please\ detach\ i cannot\ migrate\ data\ volume[uuid\:%s]\ bewteen\ sharedblock\ primary\ storages\ when\ vm[vmuuid\:%s]\ instance\ is\ not\ stopped. = VM[vmuuid:{1}]实例未停止时,无法在SharedBlock主存储之间迁移数据云盘[uuid:{0}]。 do\ not\ support\ storage\ migration\ while\ shared\ volume[uuid\:\ %s,\ name\:\ %s]\ attached = 加载了共享云盘[uuid:{0}, name:{1}]不支持存储迁移 Cannot\ migrate\ volume\ from\ %s\ to\ %s. = 不能从{0}迁移云盘到{1} +can\ not\ migrate\ volume[%s],\ because\ volume\ state\ is\ Disabled = 不能迁移硬盘[uuid:{0}],硬盘状态为禁用 +there\ are\ not\ enough\ capacity\ for\ vm[uuid\:\ %s]\ storage\ migration,\ required\ capacity(include\ image\ cache)\:\ %s,\ current\ available\ physical\ capacity\:\ %s = 不能为虚拟机[uuid:{0}]进行存储迁移,所需容量(包括镜像缓存):{1},当前可用物理容量:{2} not\ support\ vm\ state[%s]\ to\ do\ storage\ migration = 虚拟机状态为[{0}],无法进行存储迁移 unsupported\ storage\ migration\ type\:\ from\ %s\ to\ %s = 不支持的存储迁移类型:从{0}到{1} not\ support\ to\ cancel\ %s = 不支持取消{0} @@ -2516,12 +2919,14 @@ Failed\ to\ migrate\ Image\ %s\ from\ BS\ %s\ to\ BS\ %s.\ cause\:\ %s = 无法 can\ not\ find\ volume\ path\ from\ snapshot\ install\ path[%s]\ by\ regex[%s] = vm[uuid\:%s]\ storage\ migration\ long\ job[uuid\:%s]\ failed\ because\ management\ node\ was\ restarted = 虚拟机[uuid:{0}]存储迁移长作业[uuid:{1}]失败,因为管理节点已重新启动 The\ type\ [%s]\ of\ volume\ is\ invalid. = 卷的类型[{0}]无效。 +found\ trashId(%s)\ in\ primaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ please\ clean\ it\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = 在主存储[{1}]的回收数据({0})中己存在要迁移的目标路径[{2}],如果要继续迁移卷[{3}],请先调用''APICleanUpTrashOnPrimaryStorageMsg''来手动清理该回收数据 cannot\ find\ any\ connected\ host\ to\ perform\ the\ storage\ migration\ operation = 为了执行存储迁移操作,未找到连接的主机 found\ trashId(%s)\ in\ PrimaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ Please\ clean\ it\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = 在主存储[{1}]的回收数据({0})中己存在要迁移的目标路径[{2}],如果要继续迁移云盘[{3}],请先调用''APICleanUpTrashOnPrimaryStorageMsg''来手动清理该回收数据 found\ related\ trash\ paths(%s)\ in\ PrimaryStorage\ [%s]\ for\ the\ migrate\ installPath[%s].\ Please\ clean\ them\ first\ by\ 'APICleanUpTrashOnPrimaryStorageMsg'\ if\ you\ insist\ to\ migrate\ the\ volume[%s] = -couldn’t\ find\ any\ BackupStorage\ that\ is\ connected\ and\ enabled\ for\ commiting\ volume\ [uuid\:%s] = +volume[uuid\:%s]\ has\ image[uuid\:%s]\ dependency,\ other\ dependency\ image[%s] = 盘[uuid:{0}]有镜像[uuid:{1}]依赖,其他依赖镜像有[{2}] CephPrimaryStorage[%s]\ not\ existed! = Ceph镜像服务器监控节点[{0}]不存在 current\ license[%s]\ is\ not\ valid\ license\ while\ download\ from\ imagestore\ backupstorage = 当从ImageStore镜像服务器进行下载操作,当前证书[{0}]是无效的证书 +The\ source\ vm\ has\ local\ data\ volume\ on\ host[uuid\:\ %s],but\ in\ fast\ clone\ api\ msg\ try\ to\ clone\ vm\ to\ host[%s],\ which\ is\ impossible\ for\ fast\ clone\ feature. = 源虚拟机有本地硬盘,但是该虚拟机在快照克隆API消息中尝试将虚拟机克隆到主机[{0}]失败: 快照克隆功能不支持该操作 System\ can't\ find\ imagestore\ backup\ Storage.\ Please\ do\ not\ set\ imagestore\ backup\ Storage\ server\ IP\ to\ localhost(127.*.*.*), = 系统找不到镜像仓库镜像服务器。请不要设置镜像服务器IP为localhost(127.*.*.*) %s\ failed\ to\ download\ bits\ from\ the\ imagestore\ backup\ storage[hostname\:%s,\ path\:\ %s]\ to\ the\ local\ primary\ storage[uuid\:%s,\ path\:\ %s],\ %s = {0}从镜像仓库镜像服务器[hostname:{1}, path: {2}]到本地主存储[uuid:{3}, path: {4}]下载失败,{5} failed\ to\ upload\ bits\ from\ the\ local\ storage[uuid\:%s,\ path\:%s]\ to\ image\ store\ [hostname\:%s],\ %s = 无法从本地存储[uuid:{0}, path:{1}]上传数据到镜像仓库[主机名:{2}],因为{3} @@ -2532,16 +2937,23 @@ failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ no\ data\ returned failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ expired_time\ is\ null = 无法获取PrimaryStorage[{0}]许可证信息,因为过期_时间为空 failed\ to\ parse\ the\ date\ format[%s]\ of\ the\ primaryStorage[%s]\ license\ info = 无法分析PrimaryStorage[{1}]许可证信息的日期格式[{0}] failed\ to\ get\ primaryStorage[%s]\ license\ info,\ because\ the\ returned\ data\ does\ not\ have\ an\ active\ license = 无法获取PrimaryStorage[{0}]许可证信息,因为返回的数据没有活动许可证 +the\ current\ primaryStorage\ %s\ does\ not\ have\ a\ third-party\ token\ set,\ and\ the\ block\ volume\ cannot\ be\ created\ temporarily = 当前PrimaryStorage [{0}] 没有第三方令牌设置,暂时无法创建块设备 +the\ current\ primaryStorage\ %s\ is\ not\ Ceph\ type,\ can\ not\ get\ access\ path = 当前PrimaryStorage [{0}] 不是Ceph类型,无法获取访问路径 +Ceph\ type\ block\ volume\ accessPathId,\ accessPathIqn\ cannot\ be\ null = Ceph 类型块设备访问路径ID,访问路径 IQN 不能为空 +current\ primary\ storage\ type\ not\ support\ block\ volume,\ supporttype\ has\ %s = 当前PrimaryStorage类型不支持块设备,支持类型有 {0} no\ block\ volume\ factory\ found\ for\ vendor\:\ %s = iothread\ need\ qemu\ version\ >\=\ %s,\ but\ %s\ on\ host[%s]. = IOThread需要QEMU版本>={0},但主机[{2}]上需要{1}。 iothread\ need\ libvirt\ version\ >\=\ %s,\ but\ %s\ on\ host[%s]. = IOThread需要libvirt版本>={0},但主机[{2}]上需要{1}。 root\ volume[%s]\ cannot\ set\ iothreadpin. = 根卷[{0}]无法设置ioThreadPin。 current\ iothread\ id[%s]\ is\ not\ the\ same\ as\ attached\ vol[%s]\ iothread[%s]. = 当前ioThread ID[{0}]与附加的卷[{1}]ioThread[{2}]不同。 +snapshot\ validation\ is\ unsupported\ for\ volume[uuid\:\ %s].\ Volume\ should\ be\ attached\ to\ vm = 快照验证不支持盘[uuid:{0}]。硬盘应添加到虚拟机上 +snapshot\ validation\ is\ unsupported\ for\ volume[uuid\:\ %s].\ Attached\ vm\ is\ not\ in\ state\ of\ [%s,\ %s] = 快照验证不支持硬盘[uuid:{0}]。相关的虚拟机不在状态[{1}, {2}]中 volume[uuid\:%s]\ can\ not\ found = 未找到卷[uuid:{0}] not\ support\ take\ snapshots\ volume[uuid\:%s,\ uuid\:%s]\ on\ different\ vms[uuid\:%s,\ uuid\:%s] = 不支持在不同的虚拟机[uuid:{2},uuid:{3}]上拍摄卷[UUId:{0},UUId:{1}]的快照 volume[uuid\:%s]\ is\ not\ ready = 卷[uuid:{0}]未就绪 state\ of\ vm[uuid\:\ %s]\ is\ %s,\ not\ allowed\ to\ take\ snapshots = VM[uuid:{0}]的状态为{1},不允许拍摄快照 volume[uuid\:%s]\ is\ not\ data\ volume = 云盘[uuid:{0}]不是云盘 +can\ not\ resize\ volume[%s],\ because\ volume\ state\ is\ Disabled = 不能调整硬盘[uuid:{0}]的大小,因为硬盘状态为禁用 At\ least\ one\ of\ vmInstanceUuid\ or\ uuid\ should\ be\ set = 至少应设置VMInstanceuuid或uuid中的一个 no\ volume[uuid\:%s,\ vmInstanceUuid\:%s]\ can\ be\ found = 找不到卷[uuid:{0},VMInstanceuuid:{1}] volume[uuid\:%s]\ is\ not\ root\ volume = 云盘[uuid:{0}]不是云盘 @@ -2555,6 +2967,9 @@ Minimum\ increase\ size\ should\ be\ larger\ than\ 4MB = 最小扩容量需要 Expansion\ operation\ not\ allowed\ at\ host\ disable = 扩展操作不被允许在不可用的主机上进行 Expansion\ operation\ not\ allowed\ at\ all\ host\ disable = 扩展操作不被允许在所有不可用的主机上进行 shared\ volume[uuid\:\ %s]\ has\ attached\ to\ not\ stopped\ vm\ instances[uuids\:\ %s] = 共享云盘[uuid:{0}]已连接到未停止的虚拟机实例[uuid:{1}] +name\:\ [%s]\ already\ exists,\ block\ volume\ name\ cannot\ be\ duplicated\ on\ type[%s]\ primarystorage = 名称[uuid:{0}]已存在,块硬盘名称不能在类型[uuid:{1}]主存储上重复 +[protocol]\ parameter\ is\ null,\ type[%s]\ primarystorage\ must\ set\ block\ volume\ protocol = 参数为空,类型[{0}]的主存储必须设置块硬盘协议 +current\ [%s]\ primary\ storage\ not\ support\ [%s]\ type\ protocol,\ please\ add\ protocol\ to\ storage\ first = 当前[{0}]主存储不支持[{1}]类型协议,请先添加协议 ExponBlockVolume[uuid\:%s]\ not\ found = path\ error = unable\ to\ find\ any\ TemplateConfigs\:\ [templateUuid\:\ %s] = 找不到任何TemplateConfigs:[templateUuid:{0}] @@ -2583,6 +2998,7 @@ the\ usb\ devices[uuid\:%s,\ name\:%s]\ in\ host[uuid\:%s,\ name\:%s]\ is\ occup the\ usb\ device[uuid\:%s]\ has\ already\ been\ attached\ to\ same\ vm[uuid\:%s] = the\ usb\ device[uuid\:%s]\ has\ already\ been\ attached\ to\ another\ vm[uuid\:%s] = USB设备[uuid:{0}]已经被绑定在其他的虚拟机[uuid:{1}] PassThrough\ only\ support\ use\ on\ vm\ running\ host = 直通仅支持在运行主机的虚拟机上使用 +cannot\ attach\ the\ usb\ device[uuid\:%s]\ to\ vm[uuid\:%s],\ possibly\ reasons\ include\:\ the\ device\ is\ not\ enabled\ or\ had\ been\ attached\ to\ a\ vm,\ or\ the\ device\ and\ the\ vm\ are\ not\ on\ same\ host. = 不能将USB设备[uuid:{0}]绑定到虚拟机[uuid:{1}],可能原因包括:设备未启用或已绑定到虚拟机,或设备和虚拟机不在同一主机上。 usb\ is\ already\ bound\ to\ vm[uuid\:%s]\ and\ cannot\ be\ bound\ to\ other\ vm = USB已绑定到VM[uuid:{0}],无法绑定到其他VM vm[%s]\ cannot\ start\ because\ usb\ redirect\ host\ is\ not\ connected = 虚拟机[{0}]无法启动,因为未连接USB重定向主机 cannot\ migrate\ vm[uuid\:%s]\ because\ there\ are\ usb\ devices\ attached\ by\ passthrough = 不能迁移虚拟机[uuid:{0}],因为虚拟机通过直连的方式绑定了USB设备 @@ -2606,6 +3022,7 @@ Failed\ to\ find\ vm[uuid\=%s]\ in\ ESX\ host[uuid\=%s] = Failed\ to\ update\ ESX\ VM[uuid\=%s]\ configuration.\ See\ more\ details\ in\ the\ log. = VM\ [%s]\ not\ found\ in\ vCenter = 在vCenter中找不到虚拟机[{0}] failed\ to\ power\ on\ VM,\ task\ status\:\ %s = 无法启动虚拟机,任务状态:{0} +console\ password\ is\ not\ supported\ by\ vm[uuid\:%s]\ on\ ESXHost[ESXI\ version\:%s] = 虚拟机[uuid:{0}]在ESX主机[ESXI版本:{1}]上不支持控制台密码 vCenter\ login\ name\ expected. = vCenter登录名称为空 domainName[%s]\ is\ neither\ an\ IPv4\ address\ nor\ a\ valid\ hostname = 域名[{0}]不是一个IPv4地址或有效的主机名 vCenter\ [domainName\:%s]\ has\ been\ added = vCenter[domainName:{0}]已经被添加 @@ -2630,6 +3047,10 @@ There\ are\ tasks\ running\ on\ the\ VCenter[uuid\:%s],\ please\ try\ again\ lat VCenter[uuid\:%s]\ not\ found\:\ = VCenter[uuid:{0}]不存在 Login\ failed,\ please\ check\ your\ login\ parameters. = 登录失败,请检查用户名密码是否正确 connect\ %s\ failed\:\ %s = 连接{0}失败:{1} +Login\ to\ vCenter\ [%s]\ failed\ with\ user\ [%s],please\ check\ your\ network\ connection\ and\ credential. = 登录到VCenter[{0}]失败,用户名[{1}],请检查网络连接和密码。 +Parse\ response\ failed\ from\ vCenter\ [%s],please\ check\ the\ port\ number[%d]. = 从 vCenter {0} 解析响应失败,请检查端口号[{1}] +SSL\ handshake\ failed\ with\ vCenter\ [%s],because\ insecure\ TLS\ 1.0\ is\ used.\ Manually\ enabled\ TLS\ 1.0\ in\ jdk\ configuration\ if\ needed. = 和 vCenter {0} 的 SSL 握手失败,请检查端口号[{1}],是否使用不安全的TLS 1.0。如果需要,请手动启用jdk中的TLS 1.0。 +SSL\ handshake\ failed\ with\ vCenter\ [%s],please\ check\ the\ port\ number[%d]. = 和 vCenter {0} 的 SSL 握手失败,请检查端口号[{1}]。 No\ clustered\ compute\ resource\ found = 未找到集群资源 No\ dvSwitch\ or\ qualified\ vSwitch\ found = 未找到可使用的dvSwitch/vSwitch Datastore\ %s\ not\ found\ for\ vCenter\ %s = vCenter{1}中未找到Datastore{0} @@ -2645,6 +3066,7 @@ No\ file\ Datacenter = 无文件数据中心 failed\ to\ get\ VM\ from\ installPath\:\ %s = 在路径{0}下未找到虚拟机 failed\ to\ get\ VM[%s]\ root\ disk\ usage = 获取VM[{0}]根盘使用率失败 failed\ to\ connect\ to\ vCenter\:\ %s\:\ %s = 无法连接到vCenter:{0}:{1} +VCenter[uuid\=%s]\ is\ Disabled.\ You\ can\ only\ perform\ read-only\ operations\ on\ this\ VCenter.\ If\ you\ want\ to\ make\ configuration\ changes\ to\ it,\ you\ need\ to\ update\ config\ by\ UpdateVCenterAction\ {uuid\=%s\ state\=Enabled} = vCenter[{0}]已禁用,只能对它执行只读操作。如果您想对它进行配置更改,请使用UpdateVCenterAction '{uuid={1}' state=Enabled}进行更新。 VCenter[uuid\=%s]\ are\ Disabled.\ You\ can\ only\ perform\ read-only\ operations\ on\ these\ VCenter. = VCenter\ not\ found = 找不到vCenter VCenter[%s]\ is\ not\ in\ operation\ status,\ current\ status\:\ %s = vCenter[{0}]未处于操作状态,当前状态:{1} @@ -2685,17 +3107,23 @@ cannot\ find\ the\ route\ table\ [uuid\:%s] = 找不到路由表[uuid:{0}] # In Module: ministorage Invalid\ resourceUuid\ %s = 资源uuid{0}无效 primary\ storage\ uuid\ cannot\ be\ null. = 主存储uuid不能为空。 +volume[uuid\:%s]\ has\ been\ attached\ some\ VM(s)[uuid\:%s]\ which\ are\ not\ Stopped\ and\ not\ running\ on\ the\ specific\ host. = VM[uuid\:%s]\ are\ not\ Stopped\ and\ not\ running\ on\ the\ specific\ host. = 虚拟机[uuid:{0}]未停止,也未在特定主机上运行。 Fail\ to\ %s,\ because\ host(s)[uuid\:%s]\ are\ not\ enable\ and\ not\ in\ connected\ status. = {0}失败,因为主机[uuid:{1}]未启用且未处于连接状态。 cannot\ find\ proper\ hypervisorType\ for\ primary\ storage[uuid\:%s]\ to\ handle\ image\ format\ or\ volume\ format[%s] = 对主存储[uuid:{0}]来说不能发现合适的管理程序类型来处理镜像格式或云盘格式[{1}] ResourceType\ [%s]\ of\ APIRecoverResourceSplitBrainMsg\ is\ invalid. = ApiRecoverResourceSplitBrainMsg的ResourceType[{0}]无效。 +the\ mini\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = can\ not\ determine\ which\ host = 无法确定是哪个主机 +the\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = no\ connected\ host\ found,\ mini\ storage\ failed = 未找到连接的主机,小型存储失败 +host[uuid\:\ %s]\ of\ mini\ primary\ storage[uuid\:\ %s]\ doesn't\ have\ enough\ capacity[current\:\ %s\ bytes,\ needed\:\ %s] = can\ not\ get\ cluster\ uuid\ of\ volume\ %s = 无法获取卷{0}的集群uuid -no\ connected\ host\ found\ in\ the\ cluster[uuid\:%s] = cluster[uuid:{0}]不存在已连接的主机 no\ backup\ storage\ can\ get\ image[uuid\:%s]\ of\ volume[uuid\:%s] = 没有备份存储可以获取镜像[uuid:{0}](属于卷[uuid:{1}]) image[uuid\:\ %s]\ has\ no\ image\ ref\ with\ backup\ storage[uuid\:\ %s] = 镜像[uuid:{0}]没有备份存储[uuid:{1}]的镜像引用 cannot\ find\ backup\ storage[uuid\:%s] = 找不到镜像服务器[uuid:{0}] +can\ not\ find\ any\ available\ host\ to\ resize\ volume[uuid\:\ %s]\ on\ mini\ storage[uuid\:\ %s] = +volume[uuid\:%s]\ replication\ is\ syncing\ data,\ please\ wait\ until\ it\ is\ finished. = +replication\ network\ status\ of\ volume[uuid\:%s]\ run\ into\ StandAlone,\ but\ host\ are\ all\ Connected,\ please\ recover\ it\ first. = Invalid\ path\ string\ %s = 路径字符串{0}无效 Still\ cache\ volume\ exists\ on\ ps[uuid\:%s]\ can\ not\ update\ cache\ volume\ url = PS[uuid:{0}]上仍然存在缓存卷,无法更新缓存卷URL can\ not\ find\ replication\ of\ volume\ %s\ on\ host\ %s = 在主机{1}上找不到卷{0}的复制 @@ -2707,15 +3135,33 @@ can\ not\ allocate\ storage\ sync\ port\ on\ host\ %s\:\ %s = 无法在主机{0} expect\ operate\ on\ hosts[%s]\ but\ only\ host\ %s\ are\ connected\ and\ enabled = 预期在主机[{0}]上运行,但只有主机{1}已连接并启用 mini\ storage[uuid\:%s]\ has\ to\ be\ empty\ before\ restoring\ bits\ from\ zbox.\ please\ clean\ it\ up. = 从ZBox还原位之前,小型存储[uuid:{0}]必须为空。请把它清理干净。 +# In Module: multicast-router +Rendezvous\ Point\ [%s]\ is\ not\ a\ unicast\ address = +group\ address\ [%s]\ is\ not\ a\ multicast\ address = +rp\ address\ pair\ [%s\:\ %s]\ already\ existed\ for\ multicast\ router\ [uuid\:%s] = +rp\ address\ tuple\ [%s\ \:\ %s]\ is\ not\ existed\ for\ multicast\ router\ [uuid\:%s] = +multicastRouter[uuid\:%s]\ has\ not\ been\ attached\ to\ vpc\ router = +multicast\ already\ enabled\ on\ vpc\ router\ uuid[\:%s] = +vpc\ router\ for\ multicast\ router\ [uuid\:%s]\ has\ been\ deleted = +multicast\ router\ [uuid\:%s]\ is\ not\ attached\ to\ Vpc\ Router = +multicast\ router\ [uuid\:%s]\ has\ been\ delete\ during\ enable\ multilcast\ on\ backend = + +# In Module: network-plugin +apply\ gratuitous\ arp\ error,\ because\:%s = +release\ gratuitous\ arp\ error,\ because\:%s = + # In Module: network l2Network[uuid\:%s]\ has\ attached\ to\ cluster[uuid\:%s],\ can't\ attach\ again = 不能再次挂载二层网络[uuid:{0}],因为已经挂载到集群[uuid:{1}]上了 +could\ not\ attach\ l2\ network,\ because\ there\ is\ another\ network\ [uuid\:%s]\ on\ physical\ interface\ [%s]\ with\ different\ vswitch\ type = l2Network[uuid\:%s]\ has\ not\ attached\ to\ cluster[uuid\:%s] = 二层网络[uuid:{0}]没有挂载到集群上[uuid:{1}] type[%s]\ should\ be\ attached\ to\ all\ host = +could\ not\ attach\ l2Network[uuid\:%s]\ to\ host[uuid\:%s]\ which\ is\ in\ the\ premaintenance\ or\ maintenance\ state = l2Network[uuid\:%s]\ has\ not\ attached\ to\ cluster\ of\ host[uuid\:%s] = l2Network[uuid\:%s]\ has\ not\ attached\ to\ host[uuid\:%s] = unsupported\ l2Network\ type[%s] = 不支持的网络类型[{0}] unsupported\ vSwitch\ type[%s] = 不支持的vSwitch类型[{0}] l2\ network[type\:%s]\ does\ not\ support\ update\ virtual\ network\ id = +cannot\ update\ virtual\ network\ id\ for\ l2Network[uuid\:%s]\ because\ it\ only\ supports\ an\ L2Network\ that\ is\ exclusively\ attached\ to\ a\ kvm\ cluster = there's\ no\ host\ in\ cluster[uuid\:\ %s],\ but\ hostParams\ is\ set = hostUuid\ can\ not\ be\ null\ in\ HostParam = host[uuid\:\ %s]\ is\ not\ in\ cluster[uuid\:\ %s] = @@ -2730,6 +3176,17 @@ There\ has\ been\ a\ l2Network[uuid\:%s,\ name\:%s]\ attached\ to\ cluster[uuid\ There\ has\ been\ a\ L2VlanNetwork[uuid\:%s,\ name\:%s]\ attached\ to\ cluster[uuid\:%s]\ that\ has\ physical\ interface[%s],\ vlan[%s].\ Failed\ to\ attach\ L2VlanNetwork[uuid\:%s] = 二层网络挂载失败[uuid:{5}]: 二层网络[uuid:{0}, name:{1}]的物理接口[{3}], vlan[{4}]已经挂载到集群[uuid:{2}]上 cannot\ find\ ip\ range\ that\ has\ ip[%s]\ in\ l3Network[uuid\:%s] = IP\ allocator\ strategy[%s]\ failed,\ because\ %s = +could\ not\ delete\ ip\ address,\ because\ it's\ used\ by\ vmnic[uuid\:%s] = +could\ not\ reserve\ ip\ range,\ because\ start\ ip[%s]\ is\ not\ valid\ ip\ address = +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ valid\ ip\ address = +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ ipv4\ address = +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ not\ ipv6\ address = +could\ not\ reserve\ ip\ range,\ because\ end\ ip[%s]\ is\ less\ than\ start\ ip[%s] = +could\ not\ reserve\ ip\ range,\ because\ there\ is\ no\ ipv4\ range = +could\ not\ reserve\ ip\ range,\ because\ there\ is\ no\ ipv6\ range = +could\ not\ reserve\ ip\ range,\ because\ reserve\ ip\ is\ not\ in\ ip\ range[%s] = +could\ not\ reserve\ ip\ range,\ because\ new\ range\ [%s\:%s]\ is\ overlapped\ with\ old\ range\ [%s\:%s] = +could\ not\ set\ mtu\ because\ l2\ network[uuid\:%s]\ of\ l3\ network\ [uuid\:%s]\ mtu\ can\ not\ be\ bigger\ than\ the\ novlan\ network = can\ not\ delete\ the\ last\ normal\ ip\ range\ because\ there\ is\ still\ has\ address\ pool = 无法删除最后一个正常IP范围,因为仍有地址池 you\ must\ update\ system\ and\ category\ both = 必须同时更行system属性和category属性 no\ ip\ range\ in\ l3[%s] = 没有IP在三层网络范围中 @@ -2756,6 +3213,7 @@ ipRangeUuids,\ L3NetworkUuids,\ zoneUuids\ must\ have\ at\ least\ one\ be\ none- all\ the\ specified\ L3\ networks\ are\ IPAM\ disabled,\ cannot\ get\ ip\ address\ capacity = unsupported\ l3network\ type[%s] = 不支持的三层网络类型[{0}] %s\ is\ not\ a\ valid\ domain\ name = {0}不是有效的域名 +not\ valid\ combination\ of\ system\ and\ category,only\ %s\ are\ valid = l3\ network\ [uuid\ %s\:\ name\ %s]\ is\ not\ a\ public\ network,\ address\ pool\ range\ can\ not\ be\ added = 三层网络[uuid{0}:名称{1}]不是公用网络,无法添加地址池范围 the\ IP\ range[%s\ ~\ %s]\ contains\ D\ class\ addresses\ which\ are\ for\ multicast = 这个IP段[{0} ~ {1}]包含了D类的组播地址 the\ IP\ range[%s\ ~\ %s]\ contains\ E\ class\ addresses\ which\ are\ reserved = 这个IP段[{0} ~ {1}]包含了E类的保留地址 @@ -2768,6 +3226,7 @@ gateway[%s]\ is\ not\ a\ IPv4\ address = 网关[{0}]不是IPV4的地址 netmask[%s]\ is\ not\ a\ netmask,\ and\ the\ IP\ range\ netmask\ cannot\ be\ 0.0.0.0 = 子网掩码[{0}]不是子网掩码,并且IP段的子网掩码不能是0.0.0.0 start\ ip[%s]\ is\ behind\ end\ ip[%s] = 起始ip[{0}]在尾ip[{1}]后 overlap\ with\ ip\ range[uuid\:%s,\ start\ ip\:%s,\ end\ ip\:\ %s] = 重叠的IP段[uuid:{0}, 起始ip:{1}, 尾ip: {2}] +multiple\ CIDR\ on\ the\ same\ L3\ network\ is\ not\ allowed.\ There\ has\ been\ a\ IP\ range[uuid\:%s,\ CIDR\:%s],\ the\ new\ IP\ range[CIDR\:%s]\ is\ not\ in\ the\ CIDR\ with\ the\ existing\ one = the\ endip[%s]\ is\ not\ in\ the\ subnet\ %s/%s = IP段结束地址不在子网{1}/{2}范围内 gateway[%s]\ can\ not\ be\ part\ of\ range[%s,\ %s] = 网关[{0}]不能是IP段[{1}, {2}]的一部分 new\ add\ ip\ range\ gateway\ %s\ is\ different\ from\ old\ gateway\ %s = 新ip段的网关地址{0}和已有ip段的网关地址{1}冲突 @@ -2807,20 +3266,29 @@ there\ has\ been\ a\ nfs\ primary\ storage\ having\ url\ as\ %s\ in\ zone[uuid\: found\ multiple\ CIDR = 返现多个CIDR invalid\ CIDR\:\ %s = 无效CIDR: {0} IP\ address[%s]\ is\ not\ in\ CIDR[%s] = IP地址[{0}]没有在CIDR[{1}]内 +there\ are\ %s\ running\ VMs\ on\ the\ NFS\ primary\ storage,\ please\ stop\ them\ and\ try\ again\:\\n%s\\n = cannot\ find\ usable\ backend = 无法找到可用的NFS主存储后端 no\ usable\ backend\ found = 无法找到可用的NFS主存储后端 +no\ host\ in\ Connected\ status\ to\ which\ nfs\ primary\ storage[uuid\:%s,\ name\:%s]\ attached\ found\ to\ revert\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s,\ name\:%s] = +no\ host\ in\ Connected\ status\ to\ which\ nfs\ primary\ storage[uuid\:%s,\ name\:%s]\ attached\ found\ to\ revert\ volume[uuid\:%s]\ to\ image[uuid\:%s] = vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Stopped,\ current\ state\ is\ %s = 虚拟机[uuid:{0}]不是运行中、已暂停或者已停止状态,现在的状态是{1} primary\ storage[uuid\:%s]\ doesn't\ attach\ to\ any\ cluster = 主存储[uuid:{0}]没有挂载到任何集群 -host\ where\ vm[uuid\:%s]\ locate\ is\ not\ Connected. = +the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ usable\ host\ to\ create\ the\ data\ volume[uuid\:%s,\ name\:%s] = +the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = the\ NFS\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ hosts\ in\ attached\ clusters\ to\ perform\ the\ operation = NFS主存储[uuid:{0}, name:{1}]不能找到主机挂载到集群,执行此操作 no\ host\ found\ for\ volume[uuid\:%s] = +the\ NFS\ primary\ storage[uuid\:%s]\ is\ not\ attached\ to\ any\ clusters,\ and\ cannot\ expunge\ the\ root\ volume[uuid\:%s]\ of\ the\ VM[uuid\:%s] = cannot\ find\ a\ connected\ host\ in\ cluster\ which\ ps\ [uuid\:\ %s]\ attached = 在PS[uuid:{0}]连接的集群中找不到已连接的主机 cannot\ find\ a\ Connected\ host\ to\ execute\ command\ for\ nfs\ primary\ storage[uuid\:%s] = 对nfs主存储[uuid:{0}]来说不能发现一个可连接的主机执行命令 +cannot\ find\ a\ host\ which\ has\ Connected\ host-NFS\ connection\ to\ execute\ command\ for\ nfs\ primary\ storage[uuid\:%s] = +unable\ to\ attach\ a\ primary\ storage[uuid\:%s,\ name\:%s]\ to\ cluster[uuid\:%s].\ Kvm\ host\ in\ the\ cluster\ has\ qemu-img\ with\ version[%s];\ but\ the\ primary\ storage\ has\ attached\ to\ another\ cluster\ that\ has\ kvm\ host\ which\ has\ qemu-img\ with\ version[%s].\ qemu-img\ version\ greater\ than\ %s\ is\ incompatible\ with\ versions\ less\ than\ %s,\ this\ will\ causes\ volume\ snapshot\ operation\ to\ fail.\ Please\ avoid\ attaching\ a\ primary\ storage\ to\ clusters\ that\ have\ different\ Linux\ distributions,\ in\ order\ to\ prevent\ qemu-img\ version\ mismatch = unable\ to\ create\ folder[installUrl\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = 无法在KVM主机[uuid:{1},IP:{2}]上创建文件夹[InstallUrl:{0}],原因是{3} no\ host\ in\ is\ Connected\ or\ primary\ storage[uuid\:%s]\ attach\ no\ cluster = 没有主机处于Connected状态,或主存储[uuid:{0}]没有挂载到任何集群 +failed\ to\ ping\ nfs\ primary\ storage[uuid\:%s]\ from\ host[uuid\:%s],because\ %s.\ disconnect\ this\ host-ps\ connection = The\ chosen\ host[uuid\:%s]\ to\ perform\ storage\ migration\ is\ lost = 准备存储迁移的主机[uuid:{0}]失联了 failed\ to\ check\ existence\ of\ %s\ on\ nfs\ primary\ storage[uuid\:%s],\ %s = 检查nfs主存储中是否存在{0}失败 unable\ to\ create\ empty\ volume[uuid\:%s,\ \ name\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ because\ %s = 不能在主机[uuid:{2}, ip:{3}]上创建空云盘[uuid:{0}, name:{1}],因为{4} +failed\ to\ delete\ bits[%s]\ on\ nfs\ primary\ storage[uuid\:%s],\ %s,\ will\ clean\ up\ installPath,\ pinv.getUuid(),\ rsp.getError() = failed\ to\ revert\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ %s = 回滚在主机[uuid:{2}, ip:{3}]上的云盘[uuid:{0}]到快照[uuid:{1}]的状态失败,{4} failed\ to\ revert\ volume[uuid\:%s]\ to\ image[uuid\:%s]\ on\ kvm\ host[uuid\:%s,\ ip\:%s],\ %s = 回滚在主机[uuid:{2}, ip:{3}]上的云盘[uuid:{0}]到镜像[uuid:{1}],{4} fails\ to\ create\ root\ volume[uuid\:%s]\ from\ cached\ image[path\:%s]\ because\ %s = 从镜像[path:{1}]创建云盘失败,因为{2} @@ -2853,6 +3321,7 @@ Export\ vm\ requires\ an\ ImageStore\ backup\ storage,\ but\ given\ backupStorag Not\ found\ the\ vm\ to\ be\ exported\ with\ the\ uuid\:\ %s = 未找到uuid为{0}的要导出的VM Only\ vm\ in\ state\:\ %s\ can\ be\ exported. = 只能导出状态为{0}的云主机。 failed\ to\ parse\ jsonCreateVmParam\ in\ APICreateVmInstanceFromOvfMsg = 无法分析APICreateVmInstanceFromOvFMsg中的JsonCreateVmParam +backup\ storage[uuid\:\ %s]\ does\ not\ have\ enough\ available\ capacity\ for\ exporting\ vm[uuid\:\ %s],\ required\ capacity\ is\:\ %d = failed\ to\ parse\ OVF\ XML\ string = 无法解析 OVF XML ova\ package[uuid\:\ %s]\ not\ found. = 未找到OVA程序包[uuid:{0}]。 Failed\ to\ read\ ovf\ file. = 无法读取OVF文件。 @@ -2863,6 +3332,7 @@ cancel\ create\ OVF\ VM\ process\ before\ creating\ VM = failed\ to\ create\ VM\ from\ OVF\ because\ the\ root\ disk\ of\ the\ VM\ cannot\ be\ found = 无法从OVF创建VM,因为找不到VM的根磁盘 message\ can\ not\ be\ null = ovfInfo\ can\ not\ be\ null = +failed\ to\ create\ ovf\ bundle\:\ Neither\ the\ OVF\ file\ nor\ the\ custom\ API\ has\ set\ the\ size\ of\ the\ root\ disk,\ so\ unable\ to\ allocate\ root\ disk.\ You\ should\ set\ root\ disk\ size\ in\ CreateVmInstanceFromOvfAction.jsonCreateVmParam.rootDiskSize = failed\ to\ create\ ovf\ bundle = 无法创建 OVF 程序包 failed\ to\ validate\ ovf\ bundle = ovfId\ is\ null = @@ -2903,6 +3373,8 @@ vip\ port\ range[vipStartPort\:%s,\ vipEndPort\:%s]\ overlaps\ with\ rule[uuid\: the\ VM[name\:%s\ uuid\:%s]\ already\ has\ port\ forwarding\ rules\ that\ have\ different\ VIPs\ than\ the\ one[uuid\:%s] = 云主机[name:{0} uuid:{1}]已经有端口转发规则,且与[uuid:{2}]有不同的VIPs the\ VmNic[uuid\:%s]\ already\ has\ port\ forwarding\ rules\ that\ have\ different\ VIPs\ than\ the\ one[uuid\:%s] = vmNic\ uuid[%s]\ is\ not\ allowed\ add\ portForwarding\ with\ allowedCidr\ rule,\ because\ vmNic\ exist\ eip = 不允许vmnic uuid[{0}]使用AllowedCIDR规则添加PortForwarding,因为vmnic存在EIP +could\ not\ attach\ port\ forwarding\ rule\ with\ allowedCidr,\ because\ vmNic[uuid\:%s]\ already\ has\ rules\ that\ overlap\ the\ target\ private\ port\ ranges[%s,\ %s]\ and\ have\ the\ same\ protocol\ type[%s] = 不允许使用 AllowedCIDR 规则添加端口转发,因为 vmnic[uuid:{0}] 已经有重叠的端口转发规则[私网端口范围:{1}, {2}],且协议类型相同[{3}] +could\ not\ attach\ port\ forwarding\ rule,\ because\ vmNic[uuid\:%s]\ already\ has\ a\ rule\ that\ overlaps\ the\ target\ private\ port\ ranges[%s,\ %s],\ has\ the\ same\ protocol\ type[%s]\ and\ has\ AllowedCidr = 不允许添加端口转发,因为 vmnic[uuid:{0}] 已经有重叠的端口转发规则[私网端口范围:{1}, {2}], 且协议类型相同[{3}] unable\ to\ create\ port\ forwarding\ rule,\ extension[%s]\ refused\ it\ because\ %s = port\ forwarding\ rule\ [uuid\:%s]\ is\ deleted = 端口转发规则[uuid:{0}]已删除 @@ -2927,6 +3399,7 @@ cannot\ find\ internal\ id\ of\ the\ session[uuid\:%s],\ are\ there\ too\ many\ # In Module: portal no\ service\ configuration\ file\ declares\ message\:\ %s = management\ node[uuid\:%s]\ is\ not\ ready\ yet = +resourceUuid[%s]\ is\ not\ a\ valid\ uuid.\ A\ valid\ uuid\ is\ a\ UUID(v4\ recommended)\ with\ '-'\ stripped.\ see\ http\://en.wikipedia.org/wiki/Universally_unique_identifier\ for\ format\ of\ UUID,\ the\ regular\ expression\ uses\ to\ validate\ a\ UUID\ is\ '[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}' = invalid\ value[%s]\ of\ field[%s] = invalid\ field[%s]\ for\ %s,\ resource[uuid\:%s,\ type\:%s]\ not\ found = invalid\ field[%s]\ for\ %s,\ resource[uuids\:%s,\ type\:%s]\ not\ found = @@ -2935,12 +3408,14 @@ invalid\ value\ %s\ of\ field[%s] = # In Module: resourceconfig resources\ has\ inconsistent\ resourceTypes.\ Details\:\ %s = 资源具有不一致的资源类型。详细信息:{0} cannot\ find\ resource[uuid\:\ %s] = 找不到资源[uuid:{0}] +ResourceConfig\ [category\:%s,\ name\:%s]\ cannot\ bind\ to\ resourceType\:\ %s = 资源配置[类别:{0},名称:{1}]无法绑定资源类型:{2} no\ global\ config[category\:%s,\ name\:%s]\ found = 找不到全局配置[类别:{0},名称:{1}] global\ config[category\:%s,\ name\:%s]\ cannot\ bind\ resource = 全局配置[类别:{0},名称:{1}]无法绑定资源 account\ has\ no\ access\ to\ the\ resource[uuid\:\ %s] = 账号没有访问资源[uuid:{0}]的权限 # In Module: rest [%s]\ field\ is\ excepted\ an\ int\ or\ long,\ but\ was\ [%s]. = [{0}] 属性期望是一个整数,但是得到的是 [{1}] +Invalid\ value\ for\ boolean\ field\ [%s],\ [%s]\ is\ not\ a\ valid\ boolean\ string[true,\ false]. = [%s] 属性值无效,[%s] 不是一个有效的布尔值字符串[true, false] # In Module: routeProtocol [%s]\ is\ not\ formatted\ as\ IPv4\ address = [{0}]的格式不是IPv4地址 @@ -2987,6 +3462,7 @@ failed\ to\ check\ physical\ interface\ for\ HardwareVxlanPool[uuid\:%s,\ name\: condition\ name[%s]\ is\ invalid,\ no\ such\ field\ on\ inventory\ class[%s] = 条件名[{0}]非法,在清单类里面没有这个阈 condition\ name[%s]\ is\ invalid,\ field[%s]\ of\ inventory[%s]\ is\ annotated\ as\ @Unqueryable\ field = 条件名[{0}]非法,清单[{2}]的值[{1}]不是被标记为@Unqueryable的值 entity\ meta\ class[%s]\ has\ no\ field[%s] = 实体元类[{0}]中没有值[{1}] +field[%s]\ is\ not\ a\ primitive\ of\ the\ inventory\ %s;\ you\ cannot\ specify\ it\ in\ the\ parameter\ 'fields';valid\ fields\ are\ %s = 值[{1}]不是清单[{0}]的基元类型,不能在参数''fields''中指定 filterName\ must\ be\ formatted\ as\ [filterType\:condition(s)] = FilterName的格式必须为[filterType:条件] 'value'\ of\ query\ condition\ %s\ cannot\ be\ null = 查询条件中{0}的''值''不能为空 search\ module\ disabled = @@ -3112,6 +3588,7 @@ security\ group[uuid\:%s]\ is\ not\ owned\ by\ account[uuid\:%s]\ or\ admin = # In Module: sftpBackupStorage SftpBackupStorage\ doesn't\ support\ scheme[%s]\ in\ url[%s] = Sftp镜像服务器不支持在url[{1}]里包含scheme[{0}] fail\ to\ cancel\ download\ image,\ because\ %s = 无法取消下载镜像,因为{0} +the\ uuid\ of\ sftpBackupStorage\ agent\ changed[expected\:%s,\ actual\:%s],\ it's\ most\ likely\ the\ agent\ was\ manually\ restarted.\ Issue\ a\ reconnect\ to\ sync\ the\ status = sftp\ backup\ storage\ do\ not\ support\ calculate\ image\ hash = Please\ stop\ the\ vm\ before\ create\ volume\ template\ to\ sftp\ backup\ storage\ %s = 请在创建SFTP备份存储{0}的卷模板之前停止虚拟机 duplicate\ backup\ storage.\ There\ has\ been\ a\ sftp\ backup\ storage[hostname\:%s]\ existing = 重复的镜像服务器。已经存在一个镜像服务器[主机名: {0}] @@ -3120,30 +3597,45 @@ check\ image\ metadata\ file\:\ %s\ failed = 检查镜像元数据文件: {0}失 image\ metadata\ file\:\ %s\ is\ not\ exist = 镜像元数据文件: {0}不存在 # In Module: sharedMountPointPrimaryStorage +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ in\ the\ clusters\ attached\ with\ the\ shared\ mount\ point\ storage[uuid\:%s]\ are\ disconnected = vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Stopped,\ current\ state[%s] = 云主机[uuid:{0}]不是运行中、已暂停或者已停止状态,现在的状态是[{1}] hosts[uuid\:%s]\ have\ the\ same\ mount\ path,\ but\ actually\ mount\ different\ storage. = 物理机[uuid:{0}]有相同挂载路径,但是实际上挂载在不同的存储上 host[uuid\:%s]\ might\ mount\ storage\ which\ is\ different\ from\ SMP[uuid\:%s],\ please\ check\ it = 物理机[uuid:{0}]可能装载与SMP[uuid:{1}]不同的存储,请检查 +the\ shared\ mount\ point\ primary\ storage[uuid\:%s,\ name\:%s]\ cannot\ find\ any\ available\ host\ in\ attached\ clusters\ for\ instantiating\ the\ volume = +the\ SMP\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = not\ supported\ operation = 不支持的操作 +the\ SMP\ primary\ storage[uuid\:%s]\ is\ not\ attached\ to\ any\ clusters,\ and\ cannot\ expunge\ the\ root\ volume[uuid\:%s]\ of\ the\ VM[uuid\:%s] = cannot\ find\ a\ Connected\ host\ to\ execute\ command\ for\ smp\ primary\ storage[uuid\:%s] = 找不到一个已连接状态的物理机为SMP主存储[uuid:{0}]执行命令 cannot\ find\ a\ host\ which\ has\ Connected\ host-SMP\ connection\ to\ execute\ command\ for\ smp\ primary\ storage[uuid\:%s] = 找不到一个和SMP主存储[uuid:{0}]处于已连接状态的物理机为其执行命令 # In Module: sharedblock sanlock\ says\ host\ %s\ is\ offline\ on\ %s = SANlock指出主机{0}在{1}上处于脱机状态 +can\ not\ find\ volume\ need\ to\ operate\ shared\ block\ group\ primary\ storage = +KVM\ host\ which\ volume[uuid%s]\ attached\ disconnected\ with\ the\ shared\ block\ group\ storage[uuid\:%s] = +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ in\ the\ clusters\ attached\ with\ the\ shared\ block\ group\ storage[uuid\:%s]\ are\ disconnected = templated\ vm[uuid\:\ %s]\ cannot\ be\ create\ from\ vm\ with\ scsi\ lun[uuids\:\ %s] = primary\ storage[uuid\:\ %s]\ has\ attached\ the\ scsi\ lun[wwid\:\ %s] = 主存储[uuid:{0}]已连接SCSI Lun[WWID:{1}] +the\ vm[uuid\:\ %s]\ does\ not\ has\ additional\ qmp\ socket,\ it\ may\ because\ of\ the\ vm\ start\ without\ the\ global\ config[vm.additionalQmp]\ enabled,\ please\ make\ sure\ it\ enabled\ and\ reboot\ vm\ in\ zstack = must\ specify\ at\ least\ one\ disk\ when\ add\ shared\ block\ group\ primary\ storage = 添加共享块存储时必须指定至少一个云盘 +shared\ block[uuid\:%s,\ diskUuid\:%s,\ description\:%s]\ already\ added\ to\ shared\ block\ group[uuid\:%s]in\ new\ shared\ block\ group = shared\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage\ can\ not\ resize = SharedBlock主存储上的共享云盘[uuid: {0}]暂时不支持扩容 shared\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage\ has\ attached\ to\ not\ stopped\ vm\ instances[uuids\:\ %s] = SharedBlock存储上的共享云盘[uuid: {0}]加载到了不是停止状态的虚拟机[uuid: {1}],请先从虚拟机卸载或将虚拟机停止 can\ not\ find\ the\ preparation\ of\ the\ volume[%s] = +use\ the\ thick\ provisioning\ volume\ as\ the\ cache\ volume.\ the\ preparation\ of\ the\ volume[%s]\ is\ %s = the\ scsi\ lun[uuid\:\ %s,\ wwid\:\ %s]\ is\ already\ attach\ to\ primary\ storage[uuid\:\ %s] = can\ not\ found\ any\ cluster\ attached\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %S] = 找不到任何挂载了共享块主存储[uuid: %S] +the\ shared\ block\ group\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = failed\ to\ connect\ to\ all\ clusters%s = +the\ SharedBlock\ primary\ storage[uuid\:%s,\ name\:%s]\ has\ not\ attached\ to\ any\ clusters,\ or\ no\ hosts\ in\ the\ attached\ clusters\ are\ connected = cannot\ find\ volume\ snapshot[uuid\:%s] = empty\ migrateVolumeStructs\ in\ migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! = MigrateEvoluesBetweenSharedBlockGroupPrimaryStorageMsg中的MigrateEvolumeStructs为空! no\ volume\ in\ migrateVolumeStructs\ in\ migrateVolumesBetweenSharedBlockGroupPrimaryStorageMsg! = MigrateEvolumesBetweenSharedBlockGroupPrimaryStorageMsg中的MigrateEvolmeStructs中没有卷! cannot\ find\ an\ available\ host\ to\ execute\ command\ for\ shared\ block\ group\ primary\ storage[uuid\:%s] = 找不到加载了共享块存储的[uuid:{0}]可用的主机 +cannot\ find\ a\ host\ which\ has\ connected\ shared\ block\ to\ execute\ command\ for\ shared\ block\ group\ primary\ storage[uuid\:%s] = +the\ host[uuid\:\ %s]\ running\ on\ is\ not\ available\ to\ resize\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = primary\ storage[uuid\:%s]\ not\ found = 找不到主存储[uuid:{0}] volume[uuid\:%s]\ not\ found = 未找到卷[uuid:{0}] +can\ not\ find\ qualified\ kvm\ host\ for\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = shared\ volume\ not\ support\ thin\ provisioning = 共享云盘不支持精简配置 not\ support\ online\ merge\ snapshot\ for\ shareable\ volume[uuid\:\ %s]\ on\ sharedblock = 不支持对共享块存储上的共享云盘[uuid: {1}]做在线合并快照,请关机或卸载后操作 the\ image[uuid\:\ %s,\ name\:%s]\ is\ not\ found\ on\ any\ backup\ storage = 在任何备份存储上都找不到镜像[uuid:{0},名称:{1}] @@ -3153,17 +3645,26 @@ not\ support\ convert\ thin\ volume\ to\ thick\ volume\ yet = 尚不支持将精 expected\ status\ is\ %s\ and\ current\ status = 预期状态为{0},当前状态为 VM[uuid\:%s]\ has\ multiple\ ISOs\ from\ different\ primary\ storage\:\ %s = VM[uuid:{0}]挂载了来自不同主存储:{1}的ISO QCow2\ shared\ volume[uuid\:%s]\ is\ not\ supported = 不支持QCOW2共享云盘[uuid:{0}] +can\ not\ find\ any\ available\ host\ to\ take\ snapshot\ for\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s] = only\ support\ full = 共享块存储目前只支持全量快照 +can\ not\ find\ any\ available\ host\ to\ migrate\ volume[uuid\:\ %s]\ between\ shared\ block\ group\ primary\ storage[uuid\:\ %s]\ and\ [uuid\:\ %s] = +can\ not\ find\ any\ available\ host\ to\ migrate\ for\ volume[uuid\:\ %s]\ on\ shared\ block\ group\ primary\ storage[uuid\:\ %s]\ and\ [uuid\:\ %s] = +can\ not\ find\ hosts\ both\ connect\ to\ primary\ storage[uuid\:\ %s]\ and\ primary\ storage[uuid\:\ %s] = +cannot\ find\ any\ connected\ host\ to\ perform\ the\ operation,\ it\ seems\ all\ KVM\ hosts\ attached\ with\ the\ shared\ block\ group\ storage[uuid\:%s]\ are\ disconnected = cannot\ shrink\ snapshot\ %s,\ because\ volume\ %s\ not\ ready = 无法收缩快照{0},因为卷{1}未就绪 cannot\ shrink\ snapshot\ %s,\ beacuse\ vm\ %s\ not\ in\ Running/Stopped\ state = 无法收缩快照{0},因为VM{1}未处于正在运行/已停止状态 get\ null\ install\ path\ in\ snapshot\ for\ vm\ %s = 在虚拟机{0}的快照中获取Null安装路径 active\ children\ snapshot\ failed,\ because\ %s = active\ installPath\ %s\ failed,\ because\ %s = +deactive\ installPath\ failed,\ because\ %s = invalid\ thinProvisioningInitializeSize\ tag = invalid\ thinProvisioningInitializeSize\ tag,\ it\ must\ be\ greater\ than\ or\ equal\ to\ %s = invalid\ thinProvisioningInitializeSize,\ it\ is\ not\ a\ number = invalid\ thinProvisioningInitializeSize\ is\ larger\ than\ %d = migrate\ volume\ without\ snapshot\ on\ shared\ block\ is\ not\ support\ to\ cancel. = 不支持取消在共享数据块上迁移不带快照的卷。 +cannot\ find\ the\ image[uuid\:%s]\ in\ any\ connected\ backup\ storage\ attached\ to\ the\ zone[uuid\:%s].\ check\ below\:\\n1.\ whether\ the\ backup\ storage\ is\ attached\ to\ the\ zone[uuid\:%s]\\n2.\ whether\ the\ backup\ storage\ is\ in\ connected\ status;\ try\ to\ reconnect\ it\ if\ not = +there\ are\ not\ enough\ capacity\ for\ image[uuid\:\ %s]\ download\ while\ volume[uuid\:\ %s]\ storage\ migration,\ required\ capacity\:\ %s,\ current\ available\ physical\ capacity\:\ %s = +there\ are\ not\ enough\ capacity\ for\ volume[uuid\:\ %s]\ storage\ migration,\ required\ capacity\:\ %s,\ current\ available\ physical\ capacity\:\ %s = data\ on\ source\ ps[uuid\:\ %s]\ has\ been\ discarded,\ not\ support\ rollback = 源主存储[uuid:{0}]上的数据已经被清理,无法回滚 # In Module: simulator2 @@ -3175,14 +3676,43 @@ set\ to\ disconnected = 设置为断开连接 on\ purpose = # In Module: slb +could\ not\ create\ slb\ instance\ because\ there\ is\ no\ load\ balancer\ slb\ group\ [uuid\:%s] = +could\ not\ create\ slb\ instance\ because\ there\ is\ no\ slb\ offering\ configured\ for\ slb\ group\ [uuid\:%s] = +could\ not\ create\ slb\ instance\ because\ image\ uuid\ of\ slb\ offering\ [uuid\:%s]\ is\ null = +could\ not\ create\ slb\ instance\ because\ image\ [uuid\:%s]\ is\ deleted = could\ not\ create\ slb\ group\ because\ invalid\ front\ l3\ network\ type\ %s = 无法创建SLB组,因为前端三层网络类型{0}无效 could\ not\ create\ slb\ group,\ because\ front\ network\ doesn't\ support\ ipv6\ yet = 无法创建SLB组,因为前端网络尚不支持IPv6 +could\ not\ execute\ the\ api\ operation.\ front\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ management\ l3\ network[uuid\:%s]\ cidr\ [%s] = could\ not\ create\ slb\ group,\ because\ backend\ network\ doesn't\ support\ ipv6\ yet = 无法创建SLB组,因为后端网络尚不支持IPv6 +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ frond\ l3\ network[uuid\:%s]\ cidr\ [%s] = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ cidr\ [%s]\ is\ overlapped\ with\ management\ l3\ network[uuid\:%s]\ cidr\ [%s] = +could\ not\ execute\ the\ api\ operation.\ frontend\ network\ [uuid\:%s]\ is\ not\ connected\ vpc\ router = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ vpc\ network\ because\ frond\ l3\ network\ is\ vpc\ network = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ not\ connected\ vpc\ router = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ while\ front\ network\ is\ connected\ to\ vpc\ router[uuid\:%s] = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ private\ flat\ network\ because\ frond\ l3\ network\ is\ private\ flat\ network = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ which\ is\ not\ connect\ to\ front\ network[uuid\:%s] = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ must\ be\ vpc\ network\ because\ other\ backend\ network\ is\ vpc\ network = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ is\ connected\ vpc\ router\ [uuid\:%s]\ while\ other\ backend\ network\ is\ connected\ to\ vpc\ router[uuid\:%s] = +could\ not\ execute\ the\ api\ operation.\ backend\ network\ [uuid\:%s]\ can\ not\ be\ vpc\ network\ because\ other\ backend\ network\ is\ not\ vpc\ network = could\ not\ create\ slb\ group\ because\ invalid\ deploy\ type\ %s = 无法创建SLB组,因为部署类型{0}无效 could\ not\ create\ slb\ group\ because\ invalid\ backend\ type\ %s = 无法创建SLB组,因为后端类型{0}无效 +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv4\ address[%s]\ format\ error = +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv4\ netmask[%s]\ format\ error = +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ip\ address\ and\ netmask\ must\ be\ set\ in\ systemTag = +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv6\ address[%s]\ format\ error = +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ipv6\ prefix[%s]\ format\ error = +can\ not\ attach\ l3\ network\ [uuid\:%s]\ to\ SLB\ instance,\ because\ ip\ address\ and\ prefix\ must\ be\ set\ in\ systemTag = can\ not\ detach\ front\ end\ l3\ network\ [uuid\:%s]\ from\ SLB\ instance = 无法从SLB实例分离前端三层网络[uuid:{0}] can\ not\ detach\ management\ l3\ network\ [uuid\:%s]\ from\ SLB\ instance = 无法从SLB实例分离管理三层网络[uuid:{0}] +can\ not\ detach\ nic\ [uuid\:%s]\ from\ SLB\ instance,\ because\ it\ is\ the\ last\ backend\ l3\ network\ nic = +can\ not\ create\ load\ balancer\ because\ vip\ [uuid\:%s]\ has\ attached\ other\ network\ service\ [%s] = +can\ not\ create\ load\ balancer\ because\ vip\ [uuid\:%s]\ has\ attached\ to\ vpc\ router\ [%s] = can\ not\ create\ load\ balancer\ because\ invalid\ slb\ group\ [uuid\:%s] = 无法创建负载平衡器,因为SLB组[uuid:{0}]无效 +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network\ [uuid\:%s]\ is\ connected\ any\ vpc\ router = +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network[uuid\:%s]\ is\ connected\ to\ different\ vpc\ router = +could\ not\ add\ vmnic\ to\ load\ balancer\ server\ \ group\ because\ l3\ network\ is\ not\ connected\ slb\ instance = +can\ not\ find\ nic\ of\ slb\ instance\ [uuid\:%s]\ which\ is\ attached\ to\ slb\ group\ front\ l3\ network\ [uuid\:%s] = failed\ to\ create\ vip%s\ on\ virtual\ router[uuid\:%s],\ because\ %s = 未能在云路由[uuid:{1}]上创建VIP{0},因为{2} can\ not\ find\ slb\ vm\ instance = 找不到SLB虚拟机实例 @@ -3191,6 +3721,12 @@ Failed\ to\ create\ SNMP\ agent,\ because\ snmp\ agent\ already\ created. = Failed\ to\ stop\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = Failed\ to\ update\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = Failed\ to\ start\ SNMP\ agent,\ please\ create\ a\ snmp\ agent\ first. = +Failed\ to\ %s\ SNMP\ agent,\ because\ readCommunity\ can\ not\ be\ empty\ when\ version\ is\ v2c = 对 SNMP 的 {0} 操作失败,当版本为 v2c 时 readCommunity 不能为空 +Failed\ to\ %s\ SNMP\ agent,\ because\ userName\ can\ not\ be\ empty\ when\ version\ is\ v3 = 对 SNMP 的 {0} 操作失败,当版本为 v3 时 userName 不能为空 +Failed\ to\ %s\ SNMP\ agent,\ auth\ algorithm\ can\ not\ be\ null\ when\ password\ is\ not\ null. = 对 SNMP 的 {0} 操作失败,当密码不为空时,认证算法不能为空 +Failed\ to\ %s\ SNMP\ agent,\ because\ auth\ password\ can\ not\ be\ empty. = 对 SNMP 的 {0} 操作失败,密码不能为空 +Failed\ to\ %s\ SNMP\ agent,\ because\ setting\ data\ encryption\ requires\ setting\ user\ verification\ first. = 对 SNMP 的 {0} 操作失败,当设置数据加密时,需要先设置用户验证 +Failed\ to\ %s\ SNMP\ agent,\ because\ privacy\ password\ can\ not\ be\ empty. = 对 SNMP 的 {0} 操作失败,privacy password 不能为空 can't\ get\ SnmpAgentImpl\ instance,\ due\ to\ no\ SnmpAgentVO\ exist. = more\ than\ one\ SnmpAgentVO\ exist. = failed\ to\ start\ snmp\ agent[%s]\ on\ port\ %s,\ due\ to\ %s = @@ -3198,6 +3734,11 @@ snmp[uuid\:%s]\ has\ not\ been\ created = failed\ to\ change\ snmp\ agent\ port\ from\ %s\ to\ %s,\ duet\ to\ %s = failed\ to\ close\ snmp\ agent\ session[%s]\ on\ port\ %s,\ due\ to\ %s = +# In Module: sns-aliyun-sms +Aliyun\ account[uuid\:%s]\ not\ exists = +invalid\ phone\ number[%s],\ sms\ number\ is\ like\ +86-18654321234 = +Aliyun\ sms\ event\ text\ template\ not\ found. = + # In Module: sns uuid\ [%s]\ already\ exists = smtpServer\ cannot\ null = @@ -3210,7 +3751,6 @@ can\ not\ create\ snmp\ platform\ with\ same\ address[%s\:%s] = can\ not\ add\ same\ email\ address\ to\ endpoint[uuid\:%s] = 无法将同一电子邮件地址添加到终结点[uuid:{0}] cannot\ update\ email\ address\ to\ %s,\ which\ is\ already\ exists\ in\ endpoint[uuid\:%s] = 无法将电子邮件地址更新为{0},该地址已存在于终结点[uuid:{1}]中 phone\ number\ [%s]\ already\ exists = 电话号码[{0}]已存在 -invalid\ phone\ number[%s],\ sms\ number\ is\ like\ +86-18654321234 = 电话号码[{0}]无效,短信号码类似于+86-18654321234 invalid\ url[%s] = 无效的url[{0}] [%s]\ is\ not\ a\ legal\ ip = [{0}]不是合法的IP invalid\ phone\ number[%s],\ the\ DingDing\ phone\ number\ is\ like\ +86-12388889999 = 无效的手机号码[{0}], 钉钉手机号码格式应当为 +86-12388889999 @@ -3240,6 +3780,24 @@ only\ HTTP\ endpoint\ can\ subscribe\ API\ topic,\ the\ endpoint[type\:%s]\ is\ API\ topic\ cannot\ be\ deleted = API通知主题无法被删除 system\ alarm\ topic\ cannot\ be\ deleted = 系统警报通知主题不能被删除 +# In Module: software-package-plugin +shell\ command\ failed = +path\ cannot\ be\ null\ or\ empty = +invalid\ path\:\ %s,\ %s = +invalid\ path\:\ %s = +filesystem\ stat\ failed = +invalid\ df\ output = +invalid\ number\ format = +Invalid\ install\ path\ detected\:\ %s.\ Paths\ must\ only\ contain\ letters,\ numbers,\ underscores,\ dashes,\ colons,\ spaces,\ dots\ and\ slashes.\ Path\ traversal\ sequences\ (..\ and\ //)\ are\ not\ allowed.\ Path\ must\ be\ absolute.\ Path\ must\ not\ be\ root\ path = 创建软件包时,路径包含非法字符。路径只能包含字母、数字、下划线、中划线、冒号、空格、点、斜杠。路径不能包含 .. 或 //。路径必须为绝对路径。路径不能为根路径 +software\ package\ [%s]\ cannot\ be\ installed\ in\ current\ state\ [%s].\ Allowed\ states\:\ %s\ or\ %s. = +software\ package\ [%s]\ cannot\ be\ uninstalled\ in\ current\ state\ [%s].\ Allowed\ states\:\ %s. = +failed\ to\ identify\ software\ package\ type.\ package\:\ %s,\ installPath\:\ %s,\ unzipPath\:\ %s.\ please\ verify\ the\ package\ format\ is\ correct\ and\ a\ corresponding\ extension\ point\ is\ registered. = 创建软件包时,无法识别软件包的类型。软件包:{0}, 安装路径:{1}, 解压路径:{2}。请确认软件包的格式正确,并且已注册相应的扩展点。 +a\ non-management\ node\ installation\ of\ the\ software\ package\ is\ detected\ in\ this\ environment.\ to\ proceed\ with\ a\ new\ management\ node-based\ installation,\ please\ first\:\\n1.\ uninstall\ the\ existing\ manually\ installed\ components\\n2.\ ensure\ the\ environment\ is\ completely\ clean\\nnote\:\ this\ installation\ must\ be\ performed\ exclusively\ through\ the\ management\ node = 创建软件包时,检测到非管理节点安装。要继续使用管理节点进行新安装,请首先:\\n1. 卸载现有的手动安装组件\\n2. 确保环境是干净的\\n注意:此安装必须仅通过管理节点进行 +no\ extension\ point\ found\ for\ software\ package\ type\:\ %s = +software\ package\ [uuid\:%s]\ not\ found = +failed\ to\ get\ software\ package\ type = +upload\ software\ package\ session\ expired = + # In Module: sshKeyPair The\ sshKeyPair\ already\ upload = The\ sshKeyPair[uuid\:%s]\ was\ in\ using. = @@ -3253,6 +3811,69 @@ Cannot\ generate\ sshKeyPair,\ error\:\ %s = failed\ to\ load\ the\ public\ key\:\ %s,\ err\:\ %s = ssh\ key\ pair[uuid\:%s]\ can\ not\ associated\ to\ vm[uuid\:%s]\ due\ to\ the\ key\ not\ found = +# In Module: sso-plugin +SSO\ client\ type[%s]\ not\ support\ for\ DeleteSSOClientAction = +casClient[uuid\:%s,\ name\:%s]\ has\ been\ deleted = +unable\ to\ find\ CAS\ client[uuid\=%s] = +duplicate\ CAS\ server[serverName\=%s] = +url\ is\ error,\ clientUuid\ is\ miss = +\ missing\ cas\ client,\ please\ create\ cas\ client\ before\ sso = +failed\ to\ find\ account\ for\ CAS\ user[name\=%s] = +multiple\ accounts\ found\ for\ CAS\ user[name\=%s] = +oAuth2Client[uuid\:%s,\ name\:%s]\ has\ been\ deleted = +redirectUrl\ is\ error,\ %s = +unable\ to\ find\ OAuth2\ client[uuid\=%s] = +duplicate\ oauth2\ server[authorizationUrl\=%s] = +multiple\ accounts\ found\ for\ OAuth2\ user[sub\=%s] = +failed\ to\ find\ account\ for\ OAuth2\ user[name\=%s],\ maybe\ logging\ in\ for\ the\ first\ time = +multiple\ accounts\ found\ for\ OAuth2\ user[AccountVO.username\=%s] = +there\ was\ an\ error,\ reason\:\ \ token\ response\ is\ null = +there\ was\ an\ error,\ reason\:\ \ %s\ is\ null = +response\ has\ error\ \:\ %s = +failed\ to\ send\ response\ to\ oauth\ server = +get\ code\ response\ has\ error\ \:\ %s = +error\ requesting\ token\ in\ clientUuid[%s],\ reason\:\ %s = +failed\ to\ post\ %s\ with\ unexpected\ status\ code\ %s = +failed\ to\ post\ %s\ with\ IO\ error = +unable\ to\ find\ oAuth2Token[userUuid\=%s] = + +# In Module: storage-device +scsi\ lun[uuid\:\ %s]\ and\ [uuid\:\ %s]\ does\ not\ has\ a\ common\ host = +scsi\ lun[uuid\:\ %s]\ is\ in\ disabled\ state = +the\ specific\ SCSI\ lun\ required = +do\ not\ support\ migration\ of\ vm[uuid\:%s]\ with\ shared\ block = +NVMe\ server[ip\:\ %s,\ port\:\ %s,\ transport\:\ %s]\ already\ exists = +NVMe\ server\ ip\:\ %s\ is\ not\ valid = +NVMe\ server[uuid\:\ %s]\ already\ attached\ to\ cluster[uuid\:\ %s] = +iSCSI\ server[ip\:\ %s,\ port\:\ %s]\ already\ exists = +iSCSI\ server\ ip\:\ %s\ is\ not\ valid = +iSCSI\ server[uuid\:\ %s]\ already\ attached\ to\ cluster[uuid\:\ %s] = +iSCSI\ server[uuid\:\ %s]\ not\ attached\ to\ cluster[uuid\:\ %s] = +iSCSI\ server[uuid\:\ %s]\ still\ attached\ to\ cluster[uuid\:\ %s] = +scsi\ lun[wwid\:\ %s]\ has\ been\ attached\ to\ vm\ instance\ %s = +scisLun[uuids\:%s]\ are\ not\ attach\ to\ the\ cluster\ of\ host[uuid\:%s] = +please\ umount\ all\ block\ devices\ of\ the\ vm[%s]\ and\ try\ again = +hba\ scan\ is\ error\:\ %s = +scsi\ lun[wwid\:%s]\ has\ been\ attached\ into\ the\ vm[%s] = +vm\ instance[%s]\ state[%s]\ not\ in\ allowed\ state[%s]\ for\ operation = +vm\ instance[%s]\ host[uuid\:\ %s]\ not\ attached\ scsi\ lun[uuid\:\ %s] = +different\ nvme\ targets\ were\ found\ on\ host[%s]\ and\ host[%s] = +SCSI\ LUN[%s]\ is\ attached\ to\ VM\ [%s] = +SCSI\ LUN[%s]\ record\ not\ found\ on\ host\ [%s] = +unexpected\ hypervisor\ type[%s]\ for\ host\ [%s] = +different\ iscsi\ configuration\ were\ found\ on\ host[uuid\:%s,\ targets\:%s]and\ host[uuid\:%s,\ targets\:%s] = 被在主机 [uuid:%s, targets:%s] 和主机 [uuid:%s, targets:%s] 中发现了不同的 iscsi 配置 +different\ disk\ types\ are\ found\ in\ different\ hosts\ for\ lun[serial\:%s],\ unable\ to\ attach\ it\ to\ cluster = 不同的主机中存在不同类型的 lun[serial:%s],无法将其挂载到集群中 +specified\ scsi\ lun[wwid\:\ %s]\ not\ exists\ or\ disabled = +scsi[%s]\ lun[wwid\:%s]\ has\ been\ attached\ into\ the\ vm[%s] = +vm\ instance[%s]\ state\ [%s]\ not\ in\ allowed\ state[%s]\ for\ operation = +vm\ instance[uuid\:\ %s]\ host[uuid\:\ %s]\ not\ attached\ scsi\ lun[uuid\:\ %s] = + +# In Module: storage-ha-plugin +not\ found\ hostId\ for\ hostUuid[%s]\ and\ primaryStorageUuid[%s] = +host\ %s's\ heartbeat\ is\ not\ updated = +host[uuid\:%s]'s\ heartbeat\ is\ not\ updated = +shareblock\ says\ host\ %s\ is\ offline\ on\ %s = + # In Module: storage null\ installPath\ returned\ from\ driver\:\ %s = %s\:\ health\ state\:\ %s = @@ -3262,6 +3883,8 @@ no\ backup\ storage\ type\ specified\ support\ to\ primary\ storage[uuid\:%s] = root\ image\ and\ root\ image\ cache\ has\ been\ deleted,\ cannot\ reimage\ now = storage\ is\ not\ healthy\:%s = No\ primary\ storage\ plugin\ registered\ with\ identity\:\ %s = +not\ support\ protocol[%s]\ on\ type[%s]\ primary\ storage = +not\ support\ take\ volumes\ snapshots\ on\ multiple\ ps\ when\ including\ storage\ snapshot = cannot\ find\ ExternalPrimaryStorage[uuid\:%s] = cannot\ connect\ any\ external\ storage = %s\ should\ not\ be\ null = {0} 不能为空 @@ -3271,6 +3894,8 @@ backup\ storage[uuid\:%s]\ has\ been\ attached\ to\ zone[uuid\:%s] = 镜像服 failed\ to\ get\ header\ of\ image\ url\ %s\:\ %s = 获取链接 {0} 的Header信息失败,原因:{1} failed\ to\ get\ header\ of\ image\ url\ %s = 获取链接 {0} 的Header信息失败 the\ backup\ storage[uuid\:%s,\ name\:%s]\ has\ not\ enough\ capacity\ to\ download\ the\ image[%s].\ Required\ size\:%s,\ available\ size\:%s = +the\ image\ size\ get\ from\ url\ %s\ is\ %d\ bytes,\ it's\ too\ small\ for\ an\ image,\ please\ check\ the\ url\ again. = +the\ backup\ storage[uuid\:%s,\ name\:%s]\ has\ not\ enough\ capacity\ to\ download\ the\ image[%s].Required\ size\:%s,\ available\ size\:%s = backup\ storage\ cannot\ proceed\ message[%s]\ because\ its\ status\ is\ %s = 镜像服务器无法处理消息[{0}]因为它的状态为{1} backup\ storage\ cannot\ proceed\ message[%s]\ because\ its\ state\ is\ %s = 镜像服务器无法处理消息[{0}]因为它的状态为{1} cannot\ reserve\ %s\ on\ the\ backup\ storage[uuid\:%s],\ it\ only\ has\ %s\ available = 无法在镜像服务器{1}保留{0},它仅有{2}可用容量 @@ -3279,6 +3904,7 @@ only\ one\ backup\ storage\ data\ network\ system\ tag\ is\ allowed,\ but\ %s\ g required\ primary\ storage[uuid\:%s,\ type\:%s]\ could\ not\ support\ any\ backup\ storage. = 所需的主存储[uuid:{0},类型:{1}]无法支持任何备份存储。 after\ subtracting\ reserved\ capacity,\ no\ backup\ storage\ has\ required\ capacity[%s\ bytes] = 减去保留容量后,没有备份存储具有所需容量[{0}字节] unable\ to\ allocate\ a\ backup\ storage = 无法分配镜像存储 +outputProtocol[%s]\ is\ exist\ on\ primary\ storage[%s]no\ need\ to\ add\ again = unknown\ primary\ storage\ type[%s] = zoneUuids,\ clusterUuids,\ primaryStorageUuids\ must\ have\ at\ least\ one\ be\ none-empty\ list,\ or\ all\ is\ set\ to\ true = zoneUuids、clusterUuids、primaryStorageUuids中必须至少有一个不为空列表,除非将字段 all 设为 true primary\ storage[uuid\:%s]\ has\ not\ been\ attached\ to\ cluster[uuid\:%s]\ yet = 主存储[uuid:{0}]还未加载到集群[uuid:{1}]上 @@ -3286,11 +3912,17 @@ primary\ storage[uuid\:%s]\ has\ been\ attached\ to\ cluster[uuid\:%s] = 主存 primary\ storage[uuid\:%s]\ and\ cluster[uuid\:%s]\ are\ not\ in\ the\ same\ zone = 主存储[uuid:{0}]和集群[uuid:{1}]不在同一个数据中心内 url[%s]\ has\ been\ occupied,\ it\ cannot\ be\ duplicate\ in\ same\ cluster = url[{0}]已经被占用,在相同的集群里它不能再次使用 'resourceUuid'\ and\ 'resourceType'\ must\ be\ set\ both\ or\ neither! = “ resourceUuid ”和“ resourceType ”必须同时设置或都不设置! +primary\ storage(s)\ [uuid\:\ %s]\ where\ volume(s)\ locate\ is\ not\ Enabled\ or\ Connected = +after\ removing\ primary\ storage%s\ to\ avoid,\ there\ is\ no\ candidate\ primary\ storage\ anymore.\ please\ check\ primary\ storage\ status\ and\ state\ in\ the\ cluster. = primary\ storage[uuid\:%s]\ is\ not\ Connected = backup\ storage[uuid\:%s]\ is\ not\ attached\ to\ zone[uuid\:%s]\ the\ primary\ storage[uuid\:%s]\ belongs\ to = 镜像服务器[uuid:{0}]没有加载到主存储[uuid:{2}]所在的数据中心[uuid:{1}] volume[uuid\:%s]\ has\ been\ attached\ a\ %s\ VM.\ VM\ should\ be\ Stopped. = 卷[uuid:{0}]已连接到{1}虚拟机。应停止虚拟机。 +primary\ storage[uuid\:%s]\ cannot\ be\ deleted\ for\ still\ being\ attached\ to\ cluster[uuid\:%s]. = cannot\ attach\ volume[uuid\:%s]\ whose\ primary\ storage\ is\ Maintenance = 无法挂载硬盘[uuid:{0}],其主存储处于维护模式 +cannot\ reserve\ %s\ bytes\ on\ the\ primary\ storage[uuid\:%s],\ it's\ short\ of\ available\ capacity = +the\ primary\ storage[uuid\:%s]\ is\ not\ in\ status\ of\ Connected,\ current\ status\ is\ %s = PrimaryStorageFeatureAllocatorFlow[%s]\ returns\ zero\ primary\ storage\ candidate = +cannot\ find\ primary\ storage\ satisfying\ conditions[connected\ to\ host\:%s,\ state\:%s,\ status\:\ %s,\ available\ capacity\ >\ %s = %s\ is\ invalid.\ %s\ is\ not\ a\ valid\ zstack\ uuid = {0}是无效的,{1}不是一个有效的ZStack uuid no\ primary\ storage[uuid\:%s]\ found = 找不到主存储[uuid:{0}] primaryStorage[uuid\=%s]\ does\ not\ exist = PrimaryStorage[uuid={0}]不存在 @@ -3307,6 +3939,9 @@ cannot\ find\ primary\ storage[uuid\:%s],\ the\ uuid\ is\ specified\ in\ instanc cannot\ find\ primary\ storage\ having\ user\ tag[%s].\ The\ user\ tag\ is\ specified\ in\ instance\ offering\ or\ disk\ offering = 找不到带有指定用户标签的主存储[uuid:{0}],该标签由计算规格或者硬盘规格指定 PrimaryStorageTagAllocatorExtensionPoint[%s]\ returns\ zero\ primary\ storage\ candidate = 主存储标签分配插件[{0}]找不到可用的主存储 failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ not\ exists. = 无法取消删除作业。卷[uuid:{0}]不存在。 +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ not\ attached\ to\ any\ vm,\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ attached\ vm\ not\ exists,\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = +failed\ to\ cancel\ deletion\ job.\ Volume[uuid\:%s]\ attached\ vm\ not\ in\ state\ %s\ offline\ snapshot\ deletion\ do\ not\ support\ cancel. = volume\ snapshot[uuids\:%s]\ is\ in\ state\ Disabled,\ cannot\ revert\ volume\ to\ it = 卷快照[uuid:{0}]处于禁用状态,无法将卷恢复为该状态 Can\ not\ take\ memory\ snapshot,\ expected\ vm\ states\ are\ [%s,\ %s] = 无法获取内存快照,预期的VM状态为[{0},{1}] volume\ snapshot[uuid\:%s]\ is\ in\ state\ %s,\ cannot\ revert\ volume\ to\ it = 硬盘快照[uuid:{0}]出于状态{1},不能恢复硬盘到该快照状态 @@ -3316,20 +3951,27 @@ can\ not\ find\ volume\ uuid\ for\ snapshosts[uuid\:\ %s] = 找不到快照主 Unsupported\ maximum\ snapshot\ number\ (%d)\ for\ volume\ [uuid\:%s] = 不支持卷[uuid:{1}]的最大快照数({0}) cannot\ find\ type\ for\ primaryStorage\ [%s] = 找不到PrimaryStorage[{0}]的类型 cannot\ ask\ primary\ storage[uuid\:%s]\ for\ volume\ snapshot\ capability = 无法向数据存储 {0} 请求卷快照功能 +primary\ storage[uuid\:%s]\ doesn't\ support\ volume\ snapshot;\ cannot\ create\ snapshot\ for\ volume[uuid\:%s] = cannot\ find\ snapshot\:\ %s = 找不到快照:{0} this\ resource\ type\ %s\ does\ not\ support\ querying\ memory\ snapshot\ references = 此资源类型{0}不支持查询内存快照引用 cannot\ find\ VmInstanceResourceMetadataGroupVO\ of\ the\ memory\ snapshot\ group[uuid\:%s] = snapshot[uuid\:%s,\ name\:%s]'s\ status[%s]\ is\ not\ allowed\ for\ message[%s],\ allowed\ status%s = cannot\ find\ volume\ snapshot[uuid\:%s,\ name\:%s],\ it\ may\ have\ been\ deleted\ by\ previous\ operation = 不能创建硬盘快照[uuid:{0}, name:{1}],该快照可能已经被以前的操作删除 snapshot\ or\ its\ desendant\ has\ reference\ volume[uuids\:%s] = 快照或其目标具有引用卷[uuid:{0}] +vm[uuid\:%s]\ is\ not\ Running,\ Paused\ or\ Destroyed,\ Stopped,\ Destroying,\ current\ state[%s] = failed\ to\ change\ status\ of\ volume\ snapshot[uuid\:%s,\ name\:%s]\ by\ status\ event[%s] = 通过状态事件[{2}]改变硬盘快照[uuid:{0}, name:{1}]失败 +unable\ to\ reset\ volume[uuid\:%s]\ to\ snapshot[uuid\:%s],\ the\ vm[uuid\:%s]\ volume\ attached\ to\ is\ not\ in\ Stopped\ state,\ current\ state\ is\ %s = snapshot(s)\ %s\ in\ the\ group\ has\ been\ deleted,\ can\ only\ revert\ one\ by\ one. = 快照组里的快照{0}已经被删除了,仅能单盘恢复。 +volume(s)\ %s\ is\ no\ longer\ attached,\ can\ only\ revert\ one\ by\ one.\ If\ you\ need\ to\ group\ revert,\ please\ re-attach\ it. = +new\ volume(s)\ %s\ attached\ after\ snapshot\ point,\ can\ only\ revert\ one\ by\ one.\ If\ you\ need\ to\ group\ revert,\ please\ detach\ it. = +\ volume[uuid\:\ %s]\ has\ been\ referenced\ by\ other\ volumes\ [%s],\ can\ not\ change\ install\ path\ before\ flatten\ them\ and\ their\ descendants\ = current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = unable\ to\ download\ iso\ to\ primary\ storage = -volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = 硬盘[uuid:{0}]未出于就绪状态,当前是{1},不能创建快照 volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = 卷[uuid:{0},类型:{1}],无法创建快照 +volume[uuid\:%s]\ is\ not\ in\ state\ Enabled,\ current\ is\ %s,\ can't\ create\ snapshot = Can\ not\ take\ memory\ snapshot,\ vm\ current\ state[%s],\ but\ expect\ state\ are\ [%s,\ %s] = 无法获取内存快照,VM当前状态为[{0}],但预期状态为[{1},{2}] +volume[uuid\:%s]\ is\ not\ in\ status\ Ready,\ current\ is\ %s,\ can't\ create\ snapshot = 硬盘[uuid:{0}]未出于就绪状态,当前是{1},不能创建快照 the\ volume[uuid\:%s]\ is\ not\ in\ status\ of\ deleted.\ This\ is\ operation\ is\ to\ recover\ a\ deleted\ data\ volume = 硬盘[uuid:{0}]未处于删除状态。此操作将覆盖一个被删除的硬盘 image[uuid\:%s]\ is\ not\ %s,\ it's\ %s = 镜像[uuid:{0}]不是{1},而是{2} image[uuid\:%s]\ is\ not\ Enabled,\ it's\ %s = 镜像不能启用,此镜像是{1} @@ -3347,17 +3989,21 @@ the\ volume[uuid\:%s]\ is\ in\ status\ of\ deleted,\ cannot\ do\ the\ operation data\ volume[uuid\:%s]\ has\ been\ attached\ to\ some\ vm,\ can't\ attach\ again = 硬盘[uuid:{0}]已经被加载上虚拟机了,不能再次加载 data\ volume\ can\ only\ be\ attached\ when\ status\ is\ [%s,\ %s],\ current\ is\ %s = 硬盘仅能当处于[{0}, {1}]状态挂载,当前状态是{2} data\ volume[uuid\:%s]\ of\ format[%s]\ is\ not\ supported\ for\ attach\ to\ any\ hypervisor. = +data\ volume[uuid\:%s]\ has\ format[%s]\ that\ can\ only\ be\ attached\ to\ hypervisor[%s],\ but\ vm\ has\ hypervisor\ type[%s].\ Can't\ attach = Can\ not\ attach\ volume\ to\ vm\ runs\ on\ host[uuid\:\ %s]\ which\ is\ disconnected\ with\ volume's\ storage[uuid\:\ %s] = it's\ not\ allowed\ to\ backup\ root\ volume,\ uuid\:%s = 备份硬盘不被允许,uuid:{0} unexpected\ disk\ size\ settings = 意外的磁盘大小设置 volume[uuid\:%s,\ type\:%s]\ can't\ be\ deleted = 无法删除卷[uuid:{0},类型:{1}] volume[uuid\:%s]\ is\ already\ in\ status\ of\ deleted = 硬盘[uuid:{0}]早已处于被删除状态 +can\ not\ delete\ volume[%s],\ because\ volume\ attach\ to\ host[%s] = it's\ not\ allowed\ to\ change\ state\ of\ root\ volume,\ uuid\:%s = 不能改变硬盘状态,uuid:{0} +can\ not\ change\ volume[%s]\ state,\ because\ volume\ attach\ to\ host[%s] = can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ host[status\:%s]\ is\ not\ connected = 无法将硬盘[{0}]挂载到主机[{1}],因为主机[status:{2}]未连接 mount\ path\ must\ be\ absolute\ path = 装载路径必须是绝对路径 can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ volume\ is\ attaching\ to\ host[%s] = 无法将硬盘[{0}]挂载到主机[{1}],因为硬盘正在挂载到主机[{2}] can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ the\ volume[%s]\ occupies\ the\ mount\ path[%s]\ on\ host[%s] = 无法将硬盘[{0}]挂载到主机[{1}],因为硬盘[{2}]在主机[{4}]上占用了挂载路径[{3}] can\ not\ attach\ volume[%s]\ to\ host[%s],\ because\ the\ another\ volume\ occupies\ the\ mount\ path[%s] = 无法将硬盘[{0}]挂载到主机[{1}],因为另一个硬盘占用了挂载路径[{2}] +can\ not\ detach\ volume[%s]\ from\ host.\ it\ may\ have\ been\ detached = cannot\ flatten\ a\ shareable\ volume[uuid\:%s] = 无法平整可共享的卷[uuid:{0}] can\ not\ found\ in\ used\ snapshot\ tree\ of\ volume[uuid\:\ %s] = 在卷[uuid:{0}]的已使用快照树中找不到 cannot\ undo\ not\ latest\ snapshot = 无法撤消不是最新的快照 @@ -3435,10 +4081,13 @@ cannot\ update\ simple\ tag\ pattern\ format = 无法更新简单标记模式格 simple\ tag\ pattern\ has\ no\ tokens = 简单标记模式没有标记 illegal\ tag\ uuids\ %s,\ tag\ type\ must\ be\ simple, = 标记uuid{0}非法,标记类型必须简单。 Invalid\ color\ specification[%s],\ must\ like\ #FF00FF = 颜色规范[{0}]无效,必须类似于#FF00FF +Get\ format[%s],\ format\ must\ like\ that\ name\:\:{tokenName1}\:\:{tokenName2}\ ...\ \:\:{tokenNameN}\ or\ {tokenName1}\:\:{tokenName2}\ ...\ \:\:{tokenNameN}\ Name\ cannot\ contain\ '{}\:' = 获取格式[{0}],格式必须类似于名称::'{tokenName1}'::'{tokenName2}' ... ::'{tokenNameN}' 或 '{tokenName1}'::'{tokenName2}' ... ::'{tokenNameN}' 名称不能包含 '''{}':'' all\ tokens\ %s\ must\ be\ specify = 必须指定所有令牌{0} you\ already\ has\ a\ tag\ which\ [name\:%s,\ color\:%s] = resource[uuid\:%s]\ has\ been\ attached\ %d\ tags,\ cannot\ attach\ any\ more = +# In Module: test-premium + # In Module: test I\ should\ not\ be\ in\ error\ list\ %d = I\ should\ not\ be\ in\ error\ list\ either\ %d = @@ -3447,6 +4096,14 @@ done,\ on\ purpose = I\ should\ not\ be\ errs\ list = I\ should\ not\ be\ errs\ list\ either. = +# In Module: testlib-premium +InfoSecEncryptDriver\ encrypt\ failed = InfoSecEncryptDriver 加密失败 +InfoSecEncryptDriver\ decrypt\ failed = InfoSecEncryptDriver 解密失败 +illegal\ argument\ %s = +failed\ to\ decrypt\ data = +fail\ to\ decrypt\ cipher\ text = +failed\ to\ parse\ MS\ envelope\:\ %s,\ %s = + # In Module: testlib # In Module: ticket @@ -3454,6 +4111,7 @@ ticket[uuid\:%s,\ name\:%s]\ can\ only\ be\ updated\ after\ being\ cancelled,\ c operation\ denied.\ the\ operator\ needs\ to\ be\ done\ by\ account/virtual\ ID[uuid\:%s] = 操作无效,需要account/virtual ID[uuid:{0}]才能完成操作 no\ accountSystemType[%s]\ defined\ in\ system = 未定义的accountSystemType[{0}]类型 not\ matched\ ticket\ type\ found = 未找到匹配的票证类型 +no\ matched\ ticket\ flow\ collection\ or\ no\ default\ ticket\ flow\ collection\ found,\ you\ must\ specify\ the\ flowCollectionUuid\ or\ create\ a\ default\ ticket\ flow\ collection\ in\ system = Ticket\ flow\ collection[uuid\:%s]\ not\ matches\ ticket\ type[uuid\:%s] = 票证流集合[uuid:{0}]与票证类型[uuid:{1}]不匹配 Ticket\ flow\ collection[uuid\:%s]\ is\ invalid,\ contact\ admin\ to\ correct\ it = 当前工单流程[uuid:{0}]失效,请联系admin修复 Ticket\ flow\ collection[uuid\:%s]\ is\ disable,\ can\ not\ be\ used = 工单流程[uuid:{0}]处于禁用状态,无法被使用 @@ -3485,6 +4143,8 @@ two\ factor\ authentication\ failed\ because\ there\ is\ no\ token\ in\ msg\ sys two\ factor\ authentication\ failed\ because\ there\ is\ no\ secret\ for\ %s\:%s = 双因素身份验证失败,因为{0}没有密码:{1} failed\ to\ verify\ two\ factor\ authentication\ code = 验证双因素身份验证代码失败 +# In Module: upgrade-hack + # In Module: utils # In Module: vhost @@ -3506,6 +4166,7 @@ service\ provider\ of\ the\ vip[uuid\:%s,\ name\:%s,\ ip\:\ %s]\ has\ been\ set\ cannot\ find\ the\ vip[uuid\:%s],\ it\ may\ have\ been\ deleted = # In Module: virtualRouterProvider +the\ virtual\ router[name\:%s,\ uuid\:%s,\ current\ state\:%s]\ is\ not\ running,and\ cannot\ perform\ required\ operation.\ Please\ retry\ your\ operation\ later\ once\ it\ is\ running = virtual\ router[uuid\:%s]\ is\ in\ status\ of\ %s\ that\ cannot\ make\ http\ call\ to\ %s = 云路由[uuid:{0}]处于状态{1}中,无法向{2}发送http调用 virtual\ router[uuid\:%s]\ has\ no\ management\ nic\ that\ cannot\ make\ http\ call\ to\ %s = 虚拟路由器[uuid:{0}]没有无法对{1}进行HTTP调用的管理NIC unable\ to\ add\ nic[ip\:%s,\ ip6\:%s,\ mac\:%s]\ to\ virtual\ router\ vm[uuid\:%s\ ip\:%s],\ because\ %s = @@ -3534,6 +4195,9 @@ No\ virtual\ router\ instance\ offering\ with\ uuid\:%s\ is\ found = 找不到uu the\ network\ of\ virtual\ router\ instance\ offering\ with\ uuid\:%s\ can't\ be\ same\ with\ private\ l3\ network\ uuid\:%s = uuid为{0}的虚拟路由器实例提供的网络不能与uuid为{1}的专用三层网络相同 unable\ to\ find\ a\ virtual\ router\ offering\ for\ l3Network[uuid\:%s]\ in\ zone[uuid\:%s],\ please\ at\ least\ create\ a\ default\ virtual\ router\ offering\ in\ that\ zone = Failed\ to\ start\ vr\ l3[uuid\:\ %s] = +cannot\ add\ ip\ range,\ because\ l3\ network[uuid\:%s]\ is\ management\ network\ of\ virtual\ router\ offering = +cannot\ add\ ip\ range,\ because\ l3\ network[uuid\:%s]\ is\ management\ network\ of\ virtual\ router = +couldn't\ add\ image,\ because\ systemTag\ [%s]\ includes\ invalid\ appliance\ image\ type\ [%s] = failed\ tot\ attach\ virtual\ router\ network\ services\ to\ l3Network[uuid\:%s].\ When\ eip\ is\ selected,\ snat\ must\ be\ selected\ too = 挂载虚拟路由网络服务到三层网络[uuid:{0}]失败。选中EIP服务时,SNAT服务也必须被选中 failed\ tot\ attach\ virtual\ router\ network\ services\ to\ l3Network[uuid\:%s].\ When\ port\ forwarding\ is\ selected,\ snat\ must\ be\ selected\ too = 挂载虚拟路由网络服务到三层网络[uuid:{0}]失败。选中端口转发服务时,SNAT服务也必须被选中 update\ virtual\ router\ [uuid\:%s]\ default\ network\ failed,\ because\ %s = 更新虚拟路由器[uuid:{0}]默认网络失败,原因是{1} @@ -3544,21 +4208,29 @@ unable\ to\ program\ dhcp\ entries\ served\ by\ virtual\ router[uuid\:%s,\ ip\:% virtual\ router[uuid\:%s,\ ip\:%s]\ failed\ to\ configure\ dns%s\ for\ L3Network[uuid\:%s,\ name\:%s],\ %s = 云路由[uuid:{0}, ip:{1}]未能为三层网络[uuid:{3}, name:{4}]配置DNS{2},错误细节: {5} virtual\ router[name\:\ %s,\ uuid\:\ %s]\ failed\ to\ configure\ dns%s,\ %s\ = 云路由[name: {0}, uuid: {1}]未能配置DNS{2},错误细节: {3} failed\ to\ create\ eip[uuid\:%s,\ name\:%s,\ ip\:%s]\ for\ vm\ nic[uuid\:%s]\ on\ virtual\ router[uuid\:%s],\ %s = 无法为虚拟机网卡[uuid:{3}]在云路由[uuid:{4}]上创建EIP[uuid:{0}, name:{1}, ip:{2}],错误细节: {5} +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ EIP[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = failed\ to\ remove\ eip[uuid\:%s,\ name\:%s,\ ip\:%s]\ for\ vm\ nic[uuid\:%s]\ on\ virtual\ router[uuid\:%s],\ %s = 未能在云路由[uuid:{4}]上为虚拟机网卡[uuid:{3}]移除EIP[uuid:{0}, name:{1}, ip:{2}],错误细节: {5} failed\ to\ sync\ eip\ on\ virtual\ router[uuid\:%s],\ %s = 未能在云路由[uuid:{0}]上同步EIP,错误细节: {1} ha\ group\ extension\ point\ nil = HA组扩展点Nil +new\ add\ vm\ nics[uuids\:%s]\ and\ attached\ vmnics\ are\ not\ on\ the\ same\ vrouter,\ they\ are\ on\ vrouters[uuids\:%s] = +new\ add\ vm\ nics[uuids\:%s]\ and\ peer\ l3s[uuids\:%s]\ of\ loadbalancer[uuid\:\ %s]'s\ vip\ are\ not\ on\ the\ same\ vrouter,\ they\ are\ on\ vrouters[uuids\:%s] = vmnic\ must\ be\ specified\ for\ share\ loadbalancer = 必须为Share LoadBalancer指定vmnic cannot\ find\ virtual\ router\ for\ load\ balancer\ [uuid\:%s] = 未能为负载均衡器[uuid:{0}]找到云路由 guest\ l3Network[uuid\:%s,\ name\:%s]\ needs\ SNAT\ service\ provided\ by\ virtual\ router,\ but\ public\ l3Network[uuid\:%s]\ of\ virtual\ router\ offering[uuid\:\ %s,\ name\:%s]\ is\ the\ same\ to\ this\ guest\ l3Network = 用户三层网络[uuid:{0}, name:{1}]需要云路由提供的SNAT服务,但是云路由规格[uuid: {3}, name:{4}]的公共三层网络[uuid:{2}]与该客户三层网络相同 virtual\ router[name\:\ %s,\ uuid\:\ %s]\ failed\ to\ sync\ snat%s,\ %s = 云路由[name: {0}, uuid: {1}]未能同步SNAT{2},错误细节: {3} failed\ to\ create\ port\ forwarding\ rule[vip\ ip\:\ %s,\ private\ ip\:\ %s,\ vip\ start\ port\:\ %s,\ vip\ end\ port\:\ %s,\ private\ start\ port\:\ %s,\ private\ end\ port\:\ %s],\ because\ %s = 无法创建端口转发规则[vip ip: {0}, private ip: {1}, vip start port: {2}, vip end port: {3}, private start port: {4}, private end port: {5}],错误细节: {6} failed\ to\ revoke\ port\ forwarding\ rules\ %s,\ because\ %s = 未能解除端口转发规则{0},原因: {1} +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ PortForwarding\ rule[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = +virtual\ router\ doesn't\ support\ port\ forwarding\ range\ redirection,\ the\ vipPortStart\ must\ be\ equals\ to\ privatePortStart\ and\ vipPortEnd\ must\ be\ equals\ to\ privatePortEnd;but\ this\ rule\ rule\ has\ a\ mismatching\ range\:\ vip\ port[%s,\ %s],\ private\ port[%s,\ %s] = failed\ to\ add\ portforwardings\ on\ virtual\ router[uuid\:%s],\ %s = 在云路由[uuid:{0}]添加端口转发失败,{1} failed\ to\ revoke\ port\ forwardings\ on\ virtual\ router[uuid\:%s],\ %s = 取消在云路由[uuid:{0}]上端口转发服务失败,{1} failed\ to\ sync\ port\ forwarding\ rules\ served\ by\ virtual\ router[name\:\ %s,\ uuid\:\ %s],\ because\ %s = 未能同步由云路由[name: {0}, uuid: {1}]提供的端口转发规则,因为: {2} +failed\ to\ sync\ vips[ips\:\ %s]\ on\ virtual\ router[uuid\:%s]\ for\ attaching\ nic[uuid\:\ %s,\ ip\:\ %s],\ because\ %s = failed\ to\ remove\ vip%s,\ because\ %s = 未能移除VIP{0},因为{1} virtual\ router[uuid\:%s,\ state\:%s]\ is\ not\ running = 云路由[uuid:{0}, state:{1}]没有运行 +found\ a\ virtual\ router\ offering[uuid\:%s]\ for\ L3Network[uuid\:%s]\ in\ zone[uuid\:%s];\ however,\ the\ network's\ public\ network[uuid\:%s]\ is\ not\ the\ same\ to\ VIP[uuid\:%s]'s;\ you\ may\ need\ to\ use\ system\ tag\ guestL3Network\:\:l3NetworkUuid\ to\ specify\ a\ particular\ virtual\ router\ offering\ for\ the\ L3Network = failed\ to\ change\ nic[ip\:%s,\ mac\:%s]\ firewall\ default\ action\ of\ virtual\ router\ vm[uuid\:%s],\ because\ %s = 修改云路由[uuid:{2}]的网卡[ip:{0}, mac:{1}]的默认防火墙规则失败,因为{3} +the\ SSH\ port\ is\ not\ open\ after\ %s\ seconds.\ Failed\ to\ login\ the\ virtual\ router[ip\:%s] = unable\ to\ ssh\ in\ to\ the\ virtual\ router[%s]\ after\ configure\ ssh = 配置SSH后,无法通过SSH连接到虚拟路由器[{0}] vyos\ init\ command\ failed,\ because\:%s = vyos init命令失败,原因是:{0} virtual\ router\ deploy\ agent\ failed,\ because\ %s = @@ -3570,29 +4242,197 @@ failed\ to\ update\ bridge[%s]\ for\ l2Network[uuid\:%s,\ name\:%s]\ on\ kvm\ ho bonding[%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = failed\ to\ update\ vlan\ bridge\ for\ virtual\ switch[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = The\ uplink\ bonding[%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = +an\ unexpected\ error\ caused\ the\ bonding\ to\ not\ be\ created\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = interface[uuid\:%s]\ is\ not\ found\ on\ host[uuid\:%s]\ for\ virtual\ switch[uuid\:%s] = +the\ default\ virtual\ switch\ network[uuid\:%s]\ cannot\ be\ deleted\ when\ it\ is\ still\ attached\ to\ hosts = +could\ not\ delete\ virtual\ switch\ network[uuid\:%s],because\ host\ kernel\ interface[uuid\:%s]\ still\ exists\ on\ the\ virtual\ switch\ and\ its\ host\ status\ is\ not\ connected = could\ not\ delete\ l2\ network[uuid\:%s]\ with\ default\ port\ group = +could\ not\ delete\ l2\ port\ group\ network[uuid\:%s],because\ host\ kernel\ interface[%s]\ still\ exists\ on\ the\ port\ group\ and\ its\ host\ status\ is\ not\ connected = +cannot\ delete\ default\ port\ group[uuid\:%s],\ because\ there\ are\ host\ kernel\ interfaces\ still\ exist\ on\ hosts[uuid\:%s] = +could\ not\ delete\ port\ group[uuid\:%s],\ because\ host\ kernel\ interface[uuid\:%s]\ still\ exists\ on\ the\ port\ group\ and\ its\ host\ status\ is\ not\ connected = +could\ not\ create\ host\ kernel\ interface,\ because\ requiredIp\ cannot\ be\ null\ with\ l3Network[uuid\:%s]\ disable\ IPAM = +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ hostUuid\ in\ struct\ should\ be\ set = +could\ not\ create\ host\ kernel\ interface,\ because\ host[uuid\:%s]\ not\ found = +could\ not\ create\ host\ kernel\ interface\ for\ host[uuid\:%s],\ because\ name\ should\ be\ set = +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ ip\ cannot\ be\ null\ with\ l3Network[uuid\:%s]\ disable\ IPAM = +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ duplicate\ ipv4\ address[%s]\ in\ input\ structs = +could\ not\ batch\ create\ host\ kernel\ interface,\ because\ duplicate\ ipv6\ address[%s]\ in\ input\ structs = +could\ not\ update\ host\ kernel\ interface[uuid\:%s],\ because\ netmask\ cannot\ be\ set\ without\ requiredIp = +could\ not\ update\ host\ kernel\ interface[uuid\:%s],\ because\ host[uuid\:%s]\ is\ not\ connected = could\ not\ delete\ default\ host\ kernel\ interface[uuid\:%s] = +could\ not\ delete\ host\ kernel\ interface[uuid\:%s],\ because\ host[uuid\:%s]\ is\ not\ connected = the\ index\ of\ virtual\ switch\ in\ zone[%s]\ exceeds\ the\ maximum[%s] = need\ to\ input\ one\ system\ tag\ like\:\ [%s] = +physicalInterface\ should\ not\ be\ null\ when\ uplink\ bonding\ is\ set = only\ one\ systemTag\ for\ uplink\ bonding\ is\ allowed = wrong\ xmit\ hash\ policy\ in\ system\ tag[%s] = wrong\ bonding\ mode\ in\ system\ tag[%s] = wrong\ system\ tag[%s],\ should\ be\ like\:\ [%s] = +could\ not\ create\ L2PortGroupNetwork,\ because\ L2VirtualSwitchNetwork[uuid\:%s]\ already\ has\ L2PortGroupNetworks\ with\ the\ same\ vlanId[%s] = +could\ not\ attach\ L2PortGroupNetwork[uuid\:%s]\ to\ cluster[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could\ not\ attach\ L2Network\ to\ KVM\ cluster,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = +could\ not\ attach\ L2VirtualSwitchNetwork,\ because\ interface[%s]\ in\ cluster[uuid\:%s]\ is\ already\ used\ for\ another\ L2VirtualSwitchNetwork = +could\ not\ attach\ L2PortGroupNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ the\ physical\ interface[%s]\ is\ invalid = +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ the\ pass-through\ state\ of\ physical\ interface[%s]\ is\ [Enabled] = +could\ not\ attach\ L2VirtualSwitchNetwork[uuid\:%s]\ to\ host[uuid\:%s],\ because\ there\ is\ no\ uplink\ configured\ for\ the\ virtual\ switch\ on\ the\ host = +could\ not\ detach\ L2PortGroupNetwork[uuid\:%s]\ from\ cluster[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could\ not\ detach\ L2Network\ from\ KVM\ cluster,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = +could\ not\ detach\ L2PortGroupNetwork[uuid\:%s]\ from\ host[uuid\:%s],\ which\ L2VirtualSwitchNetwork\ should\ be\ used = could\ not\ detach\ L2Network\ from\ host,\ because\ the\ l2Network[uuid\:%s]\ is\ default\ vSwitch = +could\ not\ create\ port\ group\ for\ L2Network[uuid\:%s]that\ does\ not\ belong\ to\ vSwitch[uuid\:%s] = could\ not\ create\ l3\ network\ on\ virtual\ switch[uuid\:%s] = vlan[%s]\ for\ port\ group\ is\ invalid = could\ not\ update\ vlan\ for\ port\ group\ with\ default\ port\ group = +could\ not\ update\ vlan\ for\ port\ group,\ because\ L2VirtualSwitchNetwork[uuid\:%s]\ already\ has\ L2PortGroupNetworks\ with\ the\ same\ vlanId[%s] = +could\ not\ update\ uplink\ bonding\ of\ default\ vSwitch\ when\ it\ is\ still\ attached\ to\ hosts\ with\ uplink\ bonding\ exist = +bondingName\ cannot\ be\ empty\ \ when\ virtual\ switch\ has\ no\ uplink\ bonding\ config = +could\ not\ update\ uplink\ bonding\ name\ because\ the\ version\ of\ the\ virtual\ switch[uuid\:%s]\ is\ old = +could\ not\ update\ uplink\ bonding\ name\ when\ virtual\ switch\ has\ uplink\ bonding\ group = +could\ not\ update\ uplink\ bonding\ name\ which\ has\ been\ occupied\ by\ another\ virtual\ switch\ attached\ to\ the\ same\ cluster = virtual\ switch[uuid\:%s]\ has\ not\ attached\ to\ host[uuid\:%s] = need\ input\ at\ least\ one\ slave = virtual\ switch[uuid\:%s]\ has\ not\ created\ uplink\ bonding\ config\ yet = +cannot\ update\ uplink\ to\ bonding,\ because\ bonding[%s]\ already\ exists\ on\ host[uuid\:%s] = +could\ not\ update\ mode\ or\ xmit_hash_policy\ of\ bonding[uuid\:%s]\ which\ is\ in\ use\ by\ virtual\ switch[uuid\:%s] = could\ not\ delete\ bonding[uuid\:%s],\ because\ it\ is\ in\ use\ by\ virtual\ switch[uuid\:%s] = failed\ to\ create\ hostKernelInterface[name\:%s]\ on\ the\ host[uuid\:%s],\ %s = failed\ to\ delete\ hostKernelInterface[uuid\:%s]\ on\ the\ host[uuid\:%s],\ %s = failed\ to\ refresh\ host\ kernel\ interface\ on\ host[uuid\:%s],\ %s = +failed\ to\ create\ default\ port\ group,\ because\ the\ bridge\ name[%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ the\ bridge\ name[%s]\ of\ vlanId[%s]\ on\ default\ virtual\ switch[%s] = failed\ to\ get\ the\ host\ interface\ for\ the\ managementIp[%s] = +failed\ to\ create\ default\ kernel\ interface,because\ the\ uplink\ bonding[name\:%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ cluster[uuid\:%s]\ default\ uplink\ bonding[name\:%s] = +failed\ to\ create\ default\ port\ group,\ because\ the\ vlanId[%s]\ of\ managementIp[%s]\ must\ be\ the\ same\ as\ cluster[uuid\:%s]\ default\ vlanId[%s] = + +# In Module: woodpecker + +# In Module: vpcFirewall +can\ not\ detach\ system\ default\ ruleSet = 无法分离系统默认规则集 +only\ system\ ruleSet\ can\ change\ action\ type = 只有系统规则集才能更改操作类型 +can\ not\ delete\ system\ default\ ruleSet = 无法删除系统默认规则集 +can\ not\ delete\ system\ default\ rule = 无法删除系统默认规则 +the\ router\ [uuid\:%s]\ does\ not\ has\ a\ master\ router = 路由器[uuid:{0}]没有主路由器 +the\ VPC\ Router[uuid\:%s]\ already\ has\ a\ firewall. = VPC路由器[uuid:{0}]已有防火墙。 +already\ has\ a\ rule\ template\ with\ name\ %s = 已有名为{0}的规则模板 +the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ rule\ number\ %s. = 规则集[{0}]已具有规则编号为{1}的规则。 +can\ not\ update\ default\ rule[%s] = 无法更新默认规则[{0}] +only\ tcp\ protocol\ can\ use\ tcp\ flag = 只有TCP协议才能使用TCP标志 +only\ icmp\ protocol\ can\ use\ icmp\ type = 只有ICMP协议才能使用ICMP类型 +the\ rule\ [%s]\ number\ is\ invalid = 规则[{0}]编号无效 +can\ not\ attach\ the\ default\ ruleSet\ to\ other\ nic = 无法将默认规则集附加到其他NIC +ruleSet[%s]\ already\ has\ a\ l3[%s] = 规则集[{0}]已具有L3[{1}] +already\ has\ a\ rule\ with\ the\ number[%s] = 已具有编号为[{0}]的规则 +the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ the\ rule\ number\ %s. = 规则集[{0}]已具有规则编号为{1}的规则。 +could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = 无法添加防火墙规则[{0}]只有TCP或UDP协议可以使用端口 +could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ protocol\ can\ use\ tcp\ flag = 无法添加防火墙规则[{0}]只有TCP协议可以使用TCP标志 +could\ not\ add\ firewall\ rule[%d]\ because\ only\ icmp\ protocol\ can\ use\ icmp\ type = 无法添加防火墙规则[{0}],因为只有ICMP协议可以使用ICMP类型 +could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = 无法添加防火墙规则[{0}],因为只有TCP或UDP协议可以使用端口 +could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ protocol\ can\ use\ tcp\ flag = 无法添加防火墙规则[{0}],因为只有TCP协议可以使用TCP标志 +could\ not\ add\ firewall\ rule[%d]\ because\ %s = 无法添加防火墙规则[{0}],因为{1} +could\ not\ add\ firewall\ rule,\ because\ ruleNo\ %d\ is\ invalid = 无法添加防火墙规则,因为RuleNo{0}无效 +could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ action\ for\ ruleNo\:%d = 无法添加防火墙规则,因为没有针对RuleNo的操作:{0} +could\ not\ add\ firewall\ rule,\ because\ source\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为源IP长度{0}对RuleNo{1}无效 +could\ not\ add\ firewall\ rule,\ because\ destination\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为目标IP长度{0}对RuleNo{1}无效 +could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ state\ for\ ruleNo\:%d = 无法添加防火墙规则,因为RuleNo没有状态:{0} +could\ not\ add\ firewall\ rule,\ because\ description\ length\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为描述长度{0}对RuleNo无效:{1} +the\ configuration\ file\ has\ format\ error = 配置文件有格式错误 +the\ firewall\ rules\ in\ the\ configuration\ file\ have\ syntax\ errors\:\ %s = 配置文件中的防火墙规则有语法错误:{0} +sync\ firewall\ config\ failed,because\ %s = 同步防火墙配置失败,因为{0} +update\ firewall\ ruleSet\ action\ failed,\ because\ %s = 更新防火墙规则集操作失败,因为{0} +Can\ not\ find\ l3[%]\ related\ mac\ on\ vRouter[%s] = 在VRouter[{0}]上找不到与L3[%]相关的MAC +create\ firewall\ rule[%s]\ failed,\ because\ %s = 创建防火墙规则[{0}]失败,原因是{1} +delete\ firewall\ on\ vRouter[%s],because\ %s = 删除VRouter[{0}]上的防火墙,因为{1} +create\ firewall\ ruleSet[%s]\ failed,\ because\ %s = 创建防火墙规则集[{0}]失败,原因是{1} +delete\ firewall\ rule\ failed\ on\ vRouter[%s],\ because\ %s = 在VRouter[{0}]上删除防火墙规则失败,因为{1} +change\ firewall\ rule\ state\ on\ vRouter[%s]\ failed,\ because\ %s = 更改VRouter[{0}]上的防火墙规则状态失败,原因是{1} +attach\ firewall\ ruleSet[%s]\ failed,\ because\ %s = 附加防火墙规则集[{0}]失败,原因是{1} +detach\ ruleSet\ failed,\ maybe\ it\ has\ been\ deleted = 分离规则集失败,它可能已被删除 +detach\ firewall\ ruleSet[%s]\ failed,because\ %s = 分离防火墙规则集[{0}]失败,原因是{1} +cannot\ find\ vpcFirewall[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到vpcFirewall[uuid:{0}],它可能已被删除 +cannot\ find\ vpcFirewallRuleSet[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到VpcFirewallRuleSet[uuid:{0}],它可能已被删除 +cannot\ find\ vpcFirewallIpSetTemplate[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到VpcFireWallipSetTemplate[uuid:{0}],它可能已被删除 +attach\ firewall\ ruleSet[%s]\ to\ l3[%s]\ failed,because\ %s = 将防火墙规则集[{0}]附加到L3[{1}]失败,原因是{2} +detach\ firewall\ ruleSet\ from\ l3[%s]\ failed,because\ %s = 从L3[{0}]分离防火墙规则集失败,原因是{1} +find\ duplicate\ rule\ numbers\ %s\ on\ firewall[%s],l3[%s],forward[%s] = 在防火墙[{1}]、L3[{2}]、转发[{3}]上查找重复的规则编号{0} +no\ changes\ in\ ruleset\ %s = 规则集{0}中没有更改 +firewall\ %s\ related\ vpc\ not\ in\ running\ state = 防火墙{0}相关的VPC未处于运行状态 +can\ not\ delete\ ruleSet[%s]\ because\ it\ still\ attached\ to\ nic = 无法删除规则集[{0}],因为它仍连接到NIC +default\ ruleset\ %s\ can\ only\ attached\ to\ one\ interface\ forward,\ but\ find\ %s\ related\ interface = 默认规则集{0}只能转发到一个接口,但找到{1}个相关接口 +cannot\ find\ vpcFirewall[uuid\:%s]\ related\ vRouter = 找不到与vpcFirewall[uuid:{0}]相关的虚拟路由器 + +# In Module: xdragon +xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = 神龙服务器不支持使用ISO镜像创建虚拟机。 + +# In Module: yunshan +the\ url\ is\ null,\ please\ config\ the\ YunShan\ NSP. = URL为空,请配置云山NSP。 + +# In Module: zboxbackup +please\ insert\ zbox\ to\ management\ node. = 请将ZBOX插入管理节点。 +some\ volume[uuids\:%s]\ recover\ failed.\ you\ can\ trigger\ it\ again\ by\ reconnect\ it. = 某些卷[uuid:{0}]恢复失败。您可以通过重新连接来再次触发它。 +there\ is\ another\ external\ backup[uuid\:\ %s]\ recovering = 另一个外部备份[uuid:{0}]正在恢复 +both\ hostUuids\ and\ backupStorageUuids\ are\ empty.\ you\ must\ specify\ one\ or\ both\ of\ them. = Hostuuid和BackupStorageuuid均为空。您必须指定其中一个或两个。 +cannot\ find\ recover.conf\ under\ zbox\ backup\ install\ dir. = 在ZBOX备份安装目录下找不到recover.conf。 +zbox\ should\ be\ inserted\ to\ a\ host\ first. = 应首先将ZBox插入主机。 +fail\ to\ backup\ database = 无法备份数据库 + +# In Module: zops-plugin +failed\ to\ config\ time\ sources\ '%s'\ in\ %s,\ because\:%s,\ raw\:\ %s = +%s\ is\ unreachable\ from\ %s,\ because\:%s = +failed\ to\ check\ is\ %s\ reachable\ from\ %s,\ because\:%s = +%s\ failed\ to\ check\ ceph\ health\ status,\ because\:\ %s = +failed\ to\ get\ chrony\ sources,\ because\:%s = +failed\ to\ get\ %s's\ chrony\ sources,\ because\:%s = +failed\ to\ synchronize\ chrony\ server\ in\ %s,\ because\:%s,\ raw\:\ %s = +fail\ to\ check\ is\ %s\ reachable\ from\ host\ %s,\ because\ %s\ is\ not\ managed\ by\ us = +fail\ to\ delete\ old\ chrony\ server\ in\ zstack.properties\ in\ %s,\ because\:%s = +fail\ to\ config\ chrony\ %s\ server\ in\ zstack.properties\ in\ %s,\ because\:%s = +%s\ is\ not\ a\ valid\ ip\ address = +internal\ and\ external\ chrony\ servers\ cannot\ be\ null\ at\ the\ same\ time = +%s\ is\ not\ a\ valid\ ip\ address\ or\ domain\ name = +%s\ cannot\ be\ set\ as\ external\ chrony\ server! = +%s\ is\ unreachable\ from\ %s = +ZStone\ not\ support\ update\ chrony\ server\ online\ yet! = +ceph\ status\ is\ unhealthy,\ please\ check\ your\ environment\ first!\ %s = + +# In Module: vxlan +cannot\ configure\ vxlan\ network\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = 无法为云主机[uuid:{0}]在目标主机[uuid:{1}]上配置VXLAN网络 +cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ out\ of\ vni\ range = +cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ duplicate\ with\ l2Network[uuid\:%s] = +find\ multiple\ vtep\ ips[%s]\ for\ one\ host[uuid\:%s],\ need\ to\ delete\ host\ and\ add\ again = 在一个主机[uuid:{1}]发现多个VTEP IP,需要删除主机在进行添加 +failed\ to\ find\ vtep\ on\ host[uuid\:\ %s],\ please\ re-attach\ vxlanpool[uuid\:\ %s]\ to\ cluster. = 无法在主机[uuid:{0}]上找到VTEP,请将vxlanpool[uuid:{1}]重新挂接到集群。 +failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 为二层网络[uuid:{1}, type:{2}, vni:{3}]在KVM主机[uuid:{4}]上创建网桥[{0}]失败,错误细节: {5} +failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = 为KVM主机[uuid:{3}]上的L2 VXLAN 网络[uuid:{1}, name:{2}]检查CIDR[{0}]失败,错误细节: {4} +failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 无法删除KVM主机[uuid:{4}]上的二层网络[uuid:{1},类型:{2},VNI:{3}]的网桥[{0}],因为{5} +failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetworkPool[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = 检查在kvm主机[uuid:{3}]上的l2VxlanNetworkPool[uuid:{1}, name:{2}]的CIDR[{0}]失败,{4} +failed\ to\ realize\ vxlan\ network\ pool[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 无法在KVM物理机[uuid:{2}]上实现VXLAN网络池[uuid:{0},类型:{1}],因为{3} +vni[%s]\ for\ vxlan\ is\ invalid = +cannot\ allocate\ vni[%s]\ in\ vxlan\ network[uuid\:%s]\ which\ is\ already\ allocated = +need\ to\ input\ one\ system\ tag\ like\ \:\ [%s] = 需要输入一个系统标签,格式为:[{0}] +wrong\ system\ tag\ [%s],\ should\ be\ like\ \:\ [%s] = 错误的系统标签[{0}],格式应该为:[{1}] +wrong\ cidr\ format\ in\ system\ tag\ [%s] = 系统标签[{0}]中的cidr格式错误 +overlap\ vni\ range\ with\ %s\ [%s] = 与{0}[{1}]的vni范围重叠 +vxlan\ network\ pool\ doesn't\ support\ create\ l3\ network = vxlan network pool不支持创建三层网络 +Vni\ allocator\ strategy[%s]\ returns\ nothing,\ because\ no\ vni\ is\ available\ in\ this\ VxlanNetwork[name\:%s,\ uuid\:%s] = +Cannot\ find\ L2NetworkClusterRefVO\ item\ for\ l2NetworkUuid[%s]\ clusterUuid[%s] = +ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ exist = +ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = +%s\:is\ not\ ipv4 = +vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = 物理机[uuid : {0}]在vxlan资源池[uuid : {1}]中隧道端点地址已经配置 + +# In Module: zsv +cannot\ delete\ disaster\ recovery\ license = +invalid\ ip\ format\ [%s] = +volume\ %s\ still\ have\ snapshot\ group\ on\ vm\ %s,\ cannot\ attach\ to\ other\ vm = 卷{0}在云主机{1}上仍具有快照组,无法连接到其他云主机 +volume\ %s\ still\ have\ snapshot\ group,\ cannot\ delete\ it = 卷{0}仍具有快照组,无法将其删除 +detach\ sharable\ volume\ or\ lun\ device\ before\ operating\ snapshot\ group = +failed\ to\ find\ ZSV\ additional\ license\ info\:\ %s = +Failed\ to\ check\ SSH\ keys\ on\ host[%s\:%d] = +SSH\ keys\ are\ incomplete\ on\ host[%s\:%d]. = +Failed\ to\ generate\ SSH\ keys\ on\ host[%s\:%d] = +failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node,\ node\:\ %s = +management\ node[uuid\:%s]\ must\ be\ a\ compute\ node = +failed\ to\ retrieve\ host\ uuid\ [management\ uuid\:%s] = +failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node = # In Module: volumebackup bandWidth\ must\ be\ a\ positive\ number = 带宽必须为正数 @@ -3644,6 +4484,7 @@ original\ volume[uuid\:%s]\ for\ backup[uuid\:%s]\ is\ no\ longer\ attached\ to\ VM\ not\ found\ with\ volume\ backup[uuid\:%s] = 找不到和云盘备份[uuid:{0}]对应的虚拟机 VM\ is\ not\ in\ stopped\ state\:\ %s = 当前虚拟机状态并不是停止状态:{0} No\ available\ backup\ storage\ found = 没有可用的镜像服务器 +The\ vm\ is\ creating\ a\ backup\ job,\ cannot\ enable\ the\ cdp\ task\ at\ the\ same\ time. = 虚拟机正在创建备份任务,无法同时启用 CDP 任务 cannot\ find\ volume\ backup[uuid\:%s] = the\ cluster\ of\ vm[%s]\ is\ not\ in\ the\ same\ cluster\ as\ the\ primaryStorage[%s] = Operation\ not\ supported\ on\ shared\ volume = 共享云盘不支持该操作 @@ -3664,6 +4505,7 @@ Volume\ backup[uuid\:%s]\ not\ found\ on\ any\ backup\ storage = 未在任何备 degree\ [%s]\ should\ be\ a\ positive\ number = 度[{0}]应为正数 invalid\ type[%s],\ should\ be\ [nfs,\ sshfs,\ nbd] = 类型[{0}]无效,应为[NFS,sshfs,NBD] invalid\ url[%s],\ should\ be\ hostname\:/path = URL[{0}]无效,应为hostname:/path +generate\ volume\ backup\ metadata\ file\ on\ image\ store[uuid\:%s]\ failure,\ because\ IO\ error\:\ %s = 在镜像存储[uuid:{0}] 上生成卷备份元数据文件出现 IO 错误: {1} volume\ backup\ metadata\ operation\ failure,\ because\ %s = 卷备份元数据操作失败,原因是{0} # In Module: vpc @@ -3677,6 +4519,7 @@ L3Network\ [uuid\:\ %s]\ has\ not\ been\ attached\ to\ vpc\ router = 三层网 all\ networks\ in\ same\ IPsecConnection\ must\ be\ attached\ to\ same\ VPC\ router = 在相同的IPsec连接中的所有网络必须绑定在相同的VPC路由 there\ is\ no\ master\ vpc\ for\ ha\ group\ %s = 高可用性组{0}没有主VPC there\ is\ a\ vpc[%s]\ using\ old\ ipsec\ plugin,\ upgrade\ it\ to\ create\ ipsec = 存在使用旧IPSec插件的VPC[{0}],请升级该插件以创建IPSec +there\ already\ have\ ipsec\ connection[uuid\:%s,\ name\:%s]\ with\ the\ same\ vrouter\ and\ peerAddress = the\ vip[uuid\:%s]\ has\ been\ used\ for\ %s = 虚拟IP[uuid:{0}]已经用作网络服务 {1} the\ peerAddress[%s]\ cannot\ be\ the\ same\ to\ the\ VIP\ address = 对端地址[{0}]不能和虚拟IP地址相同 the\ peerAddress[%s]\ is\ not\ an\ IPv4\ address = 对端地址[{0}]不是一个IPv4地址 @@ -3709,6 +4552,8 @@ default\ route\ network\ can\ not\ be\ detached = 无法分离默认路由网络 original\ public\ network\ can\ not\ be\ detached = 原有公网不能脱离 could\ not\ detach\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ its\ state\ is\ not\ running\ or\ stopped = 无法将三层网络与VPC路由器[uuid:{0}]分离,因为其状态未运行或已停止 could\ not\ detach\ l3\ network\ to\ vpc\ router[uuid\:%s]\ becaus\ the\ states\ of\ the\ master\ and\ slave\ are\ inconsistent = 无法将三层网络与VPC路由器[uuid:{0}]分离,因为主设备和从设备的状态不一致 +l3\ network[uuid\:%s]\ can\ not\ detach\ from\ vpc\ vrouter[uuid\:%s]\ since\ network\ services\ attached\ vips[%s]\ still\ used\ in\ l3 = +vpc\ l3\ network[uuid\:%s]\ can\ not\ detach\ from\ vpc\ vrouter[uuid\:%s]\ since\ vm\ nics[%s]\ still\ used\ in\ l3 = virtual\ router\ offering[uuid\:\ %s]\ is\ not\ enabled = 云路由规格[uuid: {0}]是不可用的 only\ vpc\ l3\ network\ can\ attach\ to\ vpc\ vrouter = 只有VPC三层网络可以绑定到VPC云路由 Vpc\ network\ [uuid\:%s]\ already\ attached\ to\ vpc\ router\ [uuid\:%s] = VPC网络[uuid:{0}]已连接到VPC路由器[uuid:{1}] @@ -3717,6 +4562,8 @@ could\ not\ attached\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ its\ stat could\ not\ attached\ l3\ network\ to\ vpc\ router[uuid\:%s]\ because\ both\ its\ state\ and\ it\ peer\ state\ is\ not\ running\ or\ stopped = 无法将三层网络连接到VPC路由器[uuid:{0}],因为其状态和对等状态均未运行或已停止 public\ network[uuid\:\ %s]\ vip[uuid\:\ %s,\ ip\:\ %s]\ peer\ with\ l3network[uuid\:\ %s]\ not\ on\ vpc\ vr[uuid\:\ %s] = 在VPC云路由[uuid: {4}]上,三层网络[uuid: {3}]没有和公有网络[uuid: {0}]虚拟IP[uuid: {1}, ip: {2}]同阶 the\ gateway[ip\:%s]\ of\ l3[uuid\:%s]\ has\ been\ occupied = 三层网络[uuid:{1}]的网关[uuid:{0}]已经被占用 +the\ static\ ip[%s]\ specified\ in\ message\ not\ equals\ to\ gateway\ ips[%s]\ of\ l3\ network[uuid\:%s] = +l3\ network\ [uuid\:%s]\ must\ be\ attached\ first,\ because\ there\ is\ vip\ on\ that\ l3\ network = dns\ address\ [%s]\ is\ not\ added\ to\ vpc\ router\ [uuid\:%s] = dns地址[{0}]未添加到vpc路由[uuid:{1}] could\ not\ add\ ip\ range\ to\ l3\ network[uuid\:%s],\ because\ it's\ overlap\ with\ cidr\ [%s]\ of\ vRouter\ [uuid\:%s] = 无法将IP范围添加到三层网络[uuid:{0}],因为它与VRouter[uuid:{2}]的CIDR[{1}]重叠 could\ not\ add\ ipv6\ range\ to\ l3\ network[uuid\:%s],\ because\ it's\ overlap\ with\ cidr\ [%s]\ of\ vRouter\ [uuid\:%s] = 无法将IPv6范围添加到三层网络[uuid:{0}],因为它与VRouter[uuid:{2}]的CIDR[{1}]重叠 @@ -3726,11 +4573,13 @@ not\ support\ to\ get\ the\ service\ %s\ state\ to\ virtual\ router\ %s = 路由 can\ not\ get\ state\ of\ distributed\ routing\ to\ virtual\ router\ %s = 获取路由器 {0} 分布式路由的状态失败 not\ support\ to\ update\ the\ service\ %s\ state\ to\ virtual\ router\ %s = 路由器 {1} 不支持更新网络功能{0} virtual\ router\ offering[uuid\:%s,\ name\:%s]\ doesn't\ have\ a\ public\ network = +vpc\ l3\ network\ must\ attach\ a\ vpc\ vrouter\ first\ before\ do\ anything\ related\ to\ vrouter(like\ start/stop\ vm,\ create\ lb,\ etc.) = dns\ address\ [%s]\ has\ bean\ added\ to\ vpc\ router\ [uuid\:%s] = 在路由器[uuid:{1}]上已经存在一个DNS[{0}] can\ not\ detach\ nic\ from\ vpc\ vr[uuid\:%s] = 不能从VPC云路由[uuid:{0}]解绑网卡 there\ is\ no\ ip\ range\ for\ l3\ network[uuid\:%s] = 三层网络[uuid:{0}]没有IP范围 the\ gateway[ip\:%s]\ of\ l3[uuid\:%s]\ has\ been\ occupied\ on\ vpc\ vr[uuid\:\ %s] = 在VPC的云路由[uuid: {2}]上,三层网络[uuid:{1}]的网关[uuid:{0}]已经被占用 unable\ to\ ssh\ in\ to\ the\ vpc\ router[%s],\ the\ ssh\ port\ seems\ not\ open = 无法通过SSH连接到VPC路由器[{0}],SSH端口似乎未打开 +the\ SSH\ port\ is\ not\ open\ after\ %s\ seconds.\ Failed\ to\ login\ the\ vpc\ router[ip\:%s] = Could\ not\ update\ this\ network\ service,\ due\ to\ vpc\ [uuid\:%s]\ is\ not\ support\ update\ network\ service\ version = 无法更新此网络服务,因为VPC[uuid:{0}]不支持更新网络服务版本 Could\ not\ update\ this\ network\ service,\ due\ to\ vpc\ [uuid\:%s]\ used\ old\ kernel\ version\:[%s] = 无法更新此网络服务,因为VPC[uuid:{0}]使用了旧内核版本:[{1}] Could\ not\ apply\ snat\ with\ non-default\ public\ network,\ due\ to\ multi\ snat\ feature\ is\ disabled = 无法使用非默认公用网络应用SNAT,因为多SNAT功能已禁用 @@ -3740,9 +4589,11 @@ invalid\ monitor\ ip\ address\ [%s] = 仲裁地址[{0}]错误 vpcHaRouter\ [uuid\:%s]\ is\ deleted = 高可用组[uuid:{0}]被删除了 there\ are\ more\ than\ 2\ vpc\ routers\ attached\ to\ haGroup\ [uuid\:%s] = 高可用组[uuid:{0}]的路由器数量已经超过2 ha\ group\ management\ l3\ and\ public\ l3\ networks[uuid\:%s]\ are\ different\ from\ offering\ l3\ networks\ [uuid\:%s] = 高可用组的管理网,公网组合[uuid:{0}]和云路由规格的三层网络[uuid:{1}]不同 +vpc\ router\ l3\ networks\ [uuid\:%s]\ are\ different\ from\ ha\ group\ l3\ networks\ [uuid\:%s],\ !!!\ please\ delete\ this\ router\ and\ recreate\ it = vpc\ router\ has\ been\ attached\ to\ ha\ group\ [uuid\:%s] = vpc路由器不在高可用组[uuid:{0}]中 vpc\ ha\ group\ [uuid\:%s]\ is\ not\ existed = 高可用组[uuid:{0}]不存在 there\ are\ more\ than\ 1\ vpc\ routers\ attached\ to\ haGroup\ [uuid\:%s] = 高可用组[uuid:{0}]的路由器数量已经超过1 +vpc\ router\ [uuid\:%s]\ can\ not\ be\ upgraded\ to\ ha\ router\ because\ it\ public\ network\ is\ same\ to\ management\ network = create\ affinityGroup\ for\ ha\ group\ [uuid\:%s]\ failed = 高可用组[uuid:{0}]创建亲和组失败 virtualrouter\ %s\ [uuid\:\ %s\ ]\ of\ VPC\ HA\ group\ %s\ [uuid\:\ %s]\ haStatus\ changed\ from\ %s\ to\ %s = VPC高可用性组{2}[uuid:{3}]的VirtualRouter{0}[uuid:{1}]的高可用性状态已从{4}更改为{5} ha\ group\ uuid\ nil = 高可用性组uuid无 @@ -3750,113 +4601,20 @@ VR[uuid\:\ %s]\ not\ running = VR[uuid:{0}]未运行 VR[uuid\:\ %s]\ not\ connected = VR[uuid:{0}]未连接 failed\ to\ enable\ ha\ on\ virtual\ router[uuid\:%s],\ %s = 路由器[uuid:{0}]打开高可用功能失败,{1} -# In Module: vpcFirewall -can\ not\ detach\ system\ default\ ruleSet = 无法分离系统默认规则集 -only\ system\ ruleSet\ can\ change\ action\ type = 只有系统规则集才能更改操作类型 -can\ not\ delete\ system\ default\ ruleSet = 无法删除系统默认规则集 -can\ not\ delete\ system\ default\ rule = 无法删除系统默认规则 -the\ router\ [uuid\:%s]\ does\ not\ has\ a\ master\ router = 路由器[uuid:{0}]没有主路由器 -the\ VPC\ Router[uuid\:%s]\ already\ has\ a\ firewall. = VPC路由器[uuid:{0}]已有防火墙。 -already\ has\ a\ rule\ template\ with\ name\ %s = 已有名为{0}的规则模板 -the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ rule\ number\ %s. = 规则集[{0}]已具有规则编号为{1}的规则。 -can\ not\ update\ default\ rule[%s] = 无法更新默认规则[{0}] -only\ tcp\ protocol\ can\ use\ tcp\ flag = 只有TCP协议才能使用TCP标志 -only\ icmp\ protocol\ can\ use\ icmp\ type = 只有ICMP协议才能使用ICMP类型 -the\ rule\ [%s]\ number\ is\ invalid = 规则[{0}]编号无效 -can\ not\ attach\ the\ default\ ruleSet\ to\ other\ nic = 无法将默认规则集附加到其他NIC -ruleSet[%s]\ already\ has\ a\ l3[%s] = 规则集[{0}]已具有L3[{1}] -already\ has\ a\ rule\ with\ the\ number[%s] = 已具有编号为[{0}]的规则 -the\ ruleSet[%s]\ already\ has\ a\ rule\ with\ the\ rule\ number\ %s. = 规则集[{0}]已具有规则编号为{1}的规则。 -could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = 无法添加防火墙规则[{0}]只有TCP或UDP协议可以使用端口 -could\ not\ add\ firewall\ rule[%d]\ only\ tcp\ protocol\ can\ use\ tcp\ flag = 无法添加防火墙规则[{0}]只有TCP协议可以使用TCP标志 -could\ not\ add\ firewall\ rule[%d]\ because\ only\ icmp\ protocol\ can\ use\ icmp\ type = 无法添加防火墙规则[{0}],因为只有ICMP协议可以使用ICMP类型 -could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ or\ udp\ protocol\ can\ use\ port = 无法添加防火墙规则[{0}],因为只有TCP或UDP协议可以使用端口 -could\ not\ add\ firewall\ rule[%d]\ because\ only\ tcp\ protocol\ can\ use\ tcp\ flag = 无法添加防火墙规则[{0}],因为只有TCP协议可以使用TCP标志 -could\ not\ add\ firewall\ rule[%d]\ because\ %s = 无法添加防火墙规则[{0}],因为{1} -could\ not\ add\ firewall\ rule,\ because\ ruleNo\ %d\ is\ invalid = 无法添加防火墙规则,因为RuleNo{0}无效 -could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ action\ for\ ruleNo\:%d = 无法添加防火墙规则,因为没有针对RuleNo的操作:{0} -could\ not\ add\ firewall\ rule,\ because\ source\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为源IP长度{0}对RuleNo{1}无效 -could\ not\ add\ firewall\ rule,\ because\ destination\ IP\ length\:\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为目标IP长度{0}对RuleNo{1}无效 -could\ not\ add\ firewall\ rule,\ because\ there\ is\ no\ state\ for\ ruleNo\:%d = 无法添加防火墙规则,因为RuleNo没有状态:{0} -could\ not\ add\ firewall\ rule,\ because\ description\ length\ %s\ is\ not\ valid\ for\ ruleNo\:%d = 无法添加防火墙规则,因为描述长度{0}对RuleNo无效:{1} -the\ configuration\ file\ has\ format\ error = 配置文件有格式错误 -the\ firewall\ rules\ in\ the\ configuration\ file\ have\ syntax\ errors\:\ %s = 配置文件中的防火墙规则有语法错误:{0} -sync\ firewall\ config\ failed,because\ %s = 同步防火墙配置失败,因为{0} -update\ firewall\ ruleSet\ action\ failed,\ because\ %s = 更新防火墙规则集操作失败,因为{0} -Can\ not\ find\ l3[%]\ related\ mac\ on\ vRouter[%s] = 在VRouter[{0}]上找不到与L3[%]相关的MAC -create\ firewall\ rule[%s]\ failed,\ because\ %s = 创建防火墙规则[{0}]失败,原因是{1} -delete\ firewall\ on\ vRouter[%s],because\ %s = 删除VRouter[{0}]上的防火墙,因为{1} -create\ firewall\ ruleSet[%s]\ failed,\ because\ %s = 创建防火墙规则集[{0}]失败,原因是{1} -delete\ firewall\ rule\ failed\ on\ vRouter[%s],\ because\ %s = 在VRouter[{0}]上删除防火墙规则失败,因为{1} -change\ firewall\ rule\ state\ on\ vRouter[%s]\ failed,\ because\ %s = 更改VRouter[{0}]上的防火墙规则状态失败,原因是{1} -attach\ firewall\ ruleSet[%s]\ failed,\ because\ %s = 附加防火墙规则集[{0}]失败,原因是{1} -detach\ ruleSet\ failed,\ maybe\ it\ has\ been\ deleted = 分离规则集失败,它可能已被删除 -detach\ firewall\ ruleSet[%s]\ failed,because\ %s = 分离防火墙规则集[{0}]失败,原因是{1} -cannot\ find\ vpcFirewall[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到vpcFirewall[uuid:{0}],它可能已被删除 -cannot\ find\ vpcFirewallRuleSet[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到VpcFirewallRuleSet[uuid:{0}],它可能已被删除 -cannot\ find\ vpcFirewallIpSetTemplate[uuid\:%s],\ it\ may\ have\ been\ deleted = 找不到VpcFireWallipSetTemplate[uuid:{0}],它可能已被删除 -attach\ firewall\ ruleSet[%s]\ to\ l3[%s]\ failed,because\ %s = 将防火墙规则集[{0}]附加到L3[{1}]失败,原因是{2} -detach\ firewall\ ruleSet\ from\ l3[%s]\ failed,because\ %s = 从L3[{0}]分离防火墙规则集失败,原因是{1} -find\ duplicate\ rule\ numbers\ %s\ on\ firewall[%s],l3[%s],forward[%s] = 在防火墙[{1}]、L3[{2}]、转发[{3}]上查找重复的规则编号{0} -no\ changes\ in\ ruleset\ %s = 规则集{0}中没有更改 -firewall\ %s\ related\ vpc\ not\ in\ running\ state = 防火墙{0}相关的VPC未处于运行状态 -can\ not\ delete\ ruleSet[%s]\ because\ it\ still\ attached\ to\ nic = 无法删除规则集[{0}],因为它仍连接到NIC -default\ ruleset\ %s\ can\ only\ attached\ to\ one\ interface\ forward,\ but\ find\ %s\ related\ interface = 默认规则集{0}只能转发到一个接口,但找到{1}个相关接口 -cannot\ find\ vpcFirewall[uuid\:%s]\ related\ vRouter = 找不到与vpcFirewall[uuid:{0}]相关的虚拟路由器 - -# In Module: vxlan -cannot\ configure\ vxlan\ network\ for\ vm[uuid\:%s]\ on\ the\ destination\ host[uuid\:%s] = 无法为云主机[uuid:{0}]在目标主机[uuid:{1}]上配置VXLAN网络 -cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ out\ of\ vni\ range = -cannot\ allocate\ vni[%s]\ in\ l2Network[uuid\:%s],\ duplicate\ with\ l2Network[uuid\:%s] = -find\ multiple\ vtep\ ips[%s]\ for\ one\ host[uuid\:%s],\ need\ to\ delete\ host\ and\ add\ again = 在一个主机[uuid:{1}]发现多个VTEP IP,需要删除主机在进行添加 -failed\ to\ find\ vtep\ on\ host[uuid\:\ %s],\ please\ re-attach\ vxlanpool[uuid\:\ %s]\ to\ cluster. = 无法在主机[uuid:{0}]上找到VTEP,请将vxlanpool[uuid:{1}]重新挂接到集群。 -failed\ to\ create\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 为二层网络[uuid:{1}, type:{2}, vni:{3}]在KVM主机[uuid:{4}]上创建网桥[{0}]失败,错误细节: {5} -failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetwork[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = 为KVM主机[uuid:{3}]上的L2 VXLAN 网络[uuid:{1}, name:{2}]检查CIDR[{0}]失败,错误细节: {4} -failed\ to\ delete\ bridge[%s]\ for\ l2Network[uuid\:%s,\ type\:%s,\ vni\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 无法删除KVM主机[uuid:{4}]上的二层网络[uuid:{1},类型:{2},VNI:{3}]的网桥[{0}],因为{5} -failed\ to\ check\ cidr[%s]\ for\ l2VxlanNetworkPool[uuid\:%s,\ name\:%s]\ on\ kvm\ host[uuid\:%s],\ %s = 检查在kvm主机[uuid:{3}]上的l2VxlanNetworkPool[uuid:{1}, name:{2}]的CIDR[{0}]失败,{4} -failed\ to\ realize\ vxlan\ network\ pool[uuid\:%s,\ type\:%s]\ on\ kvm\ host[uuid\:%s],\ because\ %s = 无法在KVM物理机[uuid:{2}]上实现VXLAN网络池[uuid:{0},类型:{1}],因为{3} -vni[%s]\ for\ vxlan\ is\ invalid = -cannot\ allocate\ vni[%s]\ in\ vxlan\ network[uuid\:%s]\ which\ is\ already\ allocated = -need\ to\ input\ one\ system\ tag\ like\ \:\ [%s] = 需要输入一个系统标签,格式为:[{0}] -wrong\ system\ tag\ [%s],\ should\ be\ like\ \:\ [%s] = 错误的系统标签[{0}],格式应该为:[{1}] -wrong\ cidr\ format\ in\ system\ tag\ [%s] = 系统标签[{0}]中的cidr格式错误 -overlap\ vni\ range\ with\ %s\ [%s] = 与{0}[{1}]的vni范围重叠 -vxlan\ network\ pool\ doesn't\ support\ create\ l3\ network = vxlan network pool不支持创建三层网络 -Vni\ allocator\ strategy[%s]\ returns\ nothing,\ because\ no\ vni\ is\ available\ in\ this\ VxlanNetwork[name\:%s,\ uuid\:%s] = -Cannot\ find\ L2NetworkClusterRefVO\ item\ for\ l2NetworkUuid[%s]\ clusterUuid[%s] = -ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ exist = -ip[%s]\ l2NetworkUuid[%s]\ clusterUuid[%s]\ ip\ exist\ in\ local\ vtep = -%s\:is\ not\ ipv4 = -vxlan\ vtep\ address\ for\ host\ [uuid\ \:\ %s]\ and\ pool\ [uuid\ \:\ %s]\ pair\ already\ existed = 物理机[uuid : {0}]在vxlan资源池[uuid : {1}]中隧道端点地址已经配置 - -# In Module: woodpecker - -# In Module: xdragon -xdragon\ host\ not\ support\ create\ vm\ using\ an\ iso\ image. = 神龙服务器不支持使用ISO镜像创建虚拟机。 - -# In Module: yunshan -the\ url\ is\ null,\ please\ config\ the\ YunShan\ NSP. = URL为空,请配置云山NSP。 - # In Module: zbox +usb\ device[uuid\:%s]\ has\ been\ attached\ VM[uuid\:%s],\ cannot\ be\ add\ to\ zbox = zbox[name\:%s]\ status\ is\ not\ Ready,\ current\ status\ is\ %s = ZBox[名称:{0}]状态未就绪,当前状态为{1} zbox[uuid\:%s]\ is\ still\ in\ use,\ cannot\ eject\ it = ZBox[uuid:{0}]仍在使用,无法将其弹出 zbox[uuid\:%s]\ is\ not\ Ready,\ cannot\ sync\ capacity. = ZBox[uuid:{0}]未就绪,无法同步容量。 only\ file\ on\ zbox[mountPath\:%s]\ can\ be\ deleted.\ but\ pass\ [%s] = 只能删除ZBox[mountPath:{0}]上的文件。但传递[{1}] zbox[name\:%s]\ state\ is\ not\ Ready,\ current\ state\ is\ %s = ZBox[名称:{0}]状态未就绪,当前状态为{1} zbox[uuid\:\ %s]\ seems\ like\ removed = ZBox[uuid:{0}]似乎已删除 - -# In Module: zboxbackup -please\ insert\ zbox\ to\ management\ node. = 请将ZBOX插入管理节点。 -some\ volume[uuids\:%s]\ recover\ failed.\ you\ can\ trigger\ it\ again\ by\ reconnect\ it. = 某些卷[uuid:{0}]恢复失败。您可以通过重新连接来再次触发它。 -there\ is\ another\ external\ backup[uuid\:\ %s]\ recovering = 另一个外部备份[uuid:{0}]正在恢复 -both\ hostUuids\ and\ backupStorageUuids\ are\ empty.\ you\ must\ specify\ one\ or\ both\ of\ them. = Hostuuid和BackupStorageuuid均为空。您必须指定其中一个或两个。 -cannot\ find\ recover.conf\ under\ zbox\ backup\ install\ dir. = 在ZBOX备份安装目录下找不到recover.conf。 -zbox\ should\ be\ inserted\ to\ a\ host\ first. = 应首先将ZBox插入主机。 -fail\ to\ backup\ database = 无法备份数据库 +please\ attach\ zbox\ to\ %s[uuid\:%s]\ and\ resume\ job.\ if\ you\ do\ not\ want\ to\ continue,\ cancel\ it. = # In Module: zbs failed\ to\ SSH\ or\ zbs-tools\ was\ not\ installed\ in\ MDS[%s],\ you\ need\ to\ check\ the\ SSH\ configuration\ and\ dependencies = failed\ to\ get\ MDS[%s]\ metadata,\ you\ need\ to\ check\ the\ ZBS\ configuration = +unable\ to\ connect\ to\ the\ ZBS\ primary\ storage[uuid\:%s],\ failed\ to\ connect\ all\ MDS = 无法连接 ZBS 主存储 [uuid:{0}]: 不能连接所有的 MDS ZBS\ primary\ storage[uuid\:%s]\ may\ have\ been\ deleted = cannot\ found\ kvm\ host[uuid\:%s],\ unable\ to\ deploy\ client = no\ MDS\ is\ Connected,\ the\ following\ MDS[%s]\ are\ not\ Connected. = @@ -3868,20 +4626,107 @@ not\ found\ MDS[%s]\ of\ zbs\ primary\ storage[uuid\:%s]\ node = all\ MDS\ of\ ZBS\ primary\ storage[uuid\:%s]\ are\ not\ in\ Connected\ state = all\ MDS\ cannot\ execute\ http\ call[%s] = -# In Module: zsv -cannot\ delete\ disaster\ recovery\ license = -invalid\ ip\ format\ [%s] = -volume\ %s\ still\ have\ snapshot\ group\ on\ vm\ %s,\ cannot\ attach\ to\ other\ vm = 卷{0}在云主机{1}上仍具有快照组,无法连接到其他云主机 -volume\ %s\ still\ have\ snapshot\ group,\ cannot\ delete\ it = 卷{0}仍具有快照组,无法将其删除 -detach\ sharable\ volume\ or\ lun\ device\ before\ operating\ snapshot\ group = -failed\ to\ find\ ZSV\ additional\ license\ info\:\ %s = -Failed\ to\ check\ SSH\ keys\ on\ host[%s\:%d] = -SSH\ keys\ are\ incomplete\ on\ host[%s\:%d]. = -Failed\ to\ generate\ SSH\ keys\ on\ host[%s\:%d] = -failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node,\ node\:\ %s = -management\ node[uuid\:%s]\ must\ be\ a\ compute\ node = -failed\ to\ retrieve\ host\ uuid\ [management\ uuid\:%s] = -failed\ to\ check\ if\ management\ node\ is\ also\ compute\ node = +# In Module: zce-x-plugin +field[adminToken]\ of\ message[APIAddZceXMsg]\ is\ mandatory\ when\ ZSphere\ and\ ZCE-X\ management\ node\ is\ not\ on\ the\ same\ host = +field[managementIp]\ and\ field[uuid]\ can\ not\ be\ null\ on\ the\ same\ time = +field[managementIp]\ and\ field[uuid]\ can\ not\ be\ set\ on\ the\ same\ time = +field[otherManagementIp]\ of\ message[APIAddZceXMsg]\ is\ invalid\:\ ZceXVO.managementIp\ and\ otherManagementIp\ must\ be\ different = +field[otherStorageIp]\ of\ message[APIAddZceXMsg]\ is\ invalid\:\ ZceXVO.managementIp,\ otherManagementIp\ and\ otherStorageIp\ must\ be\ different = +invalid\ package\ status.\ only\ Installed,\ InitializeFailed\ or\ Initialized\ packages\ can\ be\ initialized = +failed\ to\ find\ active\ ceph\ cluster = +failed\ to\ connect\ to\ ZCE-X[%s] = +access-token\ is\ invalid = 访问令牌无效 +no\ management\ node\ is\ available\ to\ communicate\ with\ ZCE-X\:\ xms-cli\ is\ missing = +ZceXVO\ with\ management\ ip[%s]\ already\ exists = +failed\ to\ create\ alert\ platform\ from\ ZCE-X[uuid\=%s] = 创建 ZCE-X 告警平台失败 +cannot\ find\ management\ node\ by\ managementIp\ %s = +SSH\ passwordless\ setup\ failed\:\ %s = +failed\ to\ generate\ ZCE-X\ env\ config\ from\ template = 生成 ZCE-X 环境配置文件失败 +failed\ to\ generate\ admin\ token = 生成 ZCE-X 管理员令牌失败 +no\ token\ found\ with\ zceX[uuid\:%s] = +install.sh\ not\ found\:\ %s.\ install.sh\ has\ been\ manually\ deleted.\ please\ re-upload\ the\ installation\ package = 未找到 install.sh: {0}. install.sh 可能被手动删除, 需要重新上传安装包 +failed\ to\ install\ distributed\ storage = +cannot\ find\ management\ node\ %s = +cleanup.sh\ not\ found\:\ %s.\ the\ cleanup.sh\ has\ been\ manually\ deleted. = +init\ config\ not\ found\ for\ software\ package\:\ %s = +failed\ to\ generate\ conf = +failed\ to\ cleanup\ env = +failed\ to\ uninstall\ distributed\ storage\ on\ hosts = +failed\ to\ get\ cluster\ from\ ZCE-X = 获取 ZCE-X 集群失败 +no\ cluster\ exists\ in\ ZCE-X = +failed\ to\ get\ licenses\ from\ ZCE-X = 获取 ZCE-X 许可证失败 +failed\ to\ get\ hosts\ from\ ZCE-X = 获取 ZCE-X 主机失败 +failed\ to\ get\ pools\ from\ ZCE-X = 获取 ZCE-X 存储池失败 +failed\ to\ get\ version\ from\ ZCE-X = 获取 ZCE-X 版本失败 +failed\ to\ get\ users\ from\ ZCE-X = 获取 ZCE-X 用户失败 +invalid\ token\ for\ ZCE-X\ server = +failed\ to\ get\ ZCE-X\ version\:\ missing\ version\ file = +failed\ to\ get\ ZCE-X\ version\:\ read\ error = +failed\ to\ update\ expiration\ time\ with\ ZCE-X\ configuration\ file[%s] = +xms-cli\ env\ config\ file[%s]\ already\ exists.\ You\ must\ check\ and\ manually\ delete\ this\ file\ and\ trying\ again = +failed\ to\ write\ xms-cli\ env\ config\ file\ to\ temp\ file = +failed\ to\ write\ ZCE-X\ cluster\ config\ file = +xms-cli\ temporary\ admin\ token\ does\ not\ exist = +failed\ to\ read\ xms-cli\ temporary\ admin\ token\:\ %s = +failed\ to\ install\ ZCE-X\ cluster\ by\ %s = +xms-cli\ does\ not\ exist = +admin_token\ already\ exists.\ You\ must\ confirm\ that\ the\ current\ admin_token\ is\ no\ longer\ in\ use,\ delete\ the\ token\ by\ command\ 'xms-cli\ access-token\ delete',\ and\ try\ again = admin_token 已经存在。请确认当前的 admin_token 不再使用,并通过 ''xms-cli access-token delete'' 删除它,然后重试 +failed\ to\ login\ ZCE-X\ when\ creating\ access\ token = +failed\ to\ create\ ZCE-X\ access\ token = +xms-cli\ returns\ invalid\ access_token = xms-cli 返回的访问令牌无效 +failed\ to\ get\ license\ content = +failed\ to\ read\ license\ bytes\:\ %s = +failed\ to\ extract\ ZCE-X\ license\ content\ from\ tarball = +No\ enc\ license\ file\ (for\ ZCE-X\ server)\ found\ in\ tarball.\ Skip\ updating\ license\ and\ continue = +failed\ to\ write\ zce-x-license.tar.gz\:\ %s = +failed\ to\ login\ ZCE-X\ when\ updating\ license = +failed\ to\ update\ ZCE-X\ license = +failed\ to\ login\ ZCE-X\ when\ getting\ license\ content = +failed\ to\ list\ ZCE-X\ clusters = +access\ token\ is\ empty = +failed\ to\ create\ template\ file\:\ %s = +failed\ to\ get\ ZCE-X\ version = +ZCE-X\ third\ party\ alert\ platform\ is\ already\ existing = +ZCE-X\ token\ is\ not\ existing = +ZCE-X\ Storage = +failed\ to\ get\ ZCE-X\ license\:\ no\ ZCE-X\ found\ with\ uuid[%s] = +more\ than\ one\ ZCE-X\ found.\ You\ should\ specify\ the\ target\ ZCE-X\ for\ uploading\ the\ license\ by\ field\ monitorIp = +only\ one\ ZCE-X\ can\ be\ specified\ for\ uploading\ the\ license\ by\ field\ monitorIp = +failed\ to\ get\ ZCE-X\ license\:\ no\ ZCE-X\ found\ with\ ip[%s] = +failed\ to\ get\ ZCE-X\ license\:\ no\ token = + +# In Module: zstone-plugin +field[hosts]\ must\ not\ be\ null\ or\ empty = +at\ least\ 2\ hosts\ are\ required\ for\ ZStone\ initialization = +field[uuid]\ in\ hosts\ must\ not\ be\ null\ or\ empty = +duplicated\ host\ uuid(s)\ found\:\ %s = +field[uuid]\ of\ message[APIUpdateZStoneHostConfigMsg].hosts\ are\ invalid\:\ %s = +field[publicIp]\ of\ message[APIUpdateZStoneHostConfigMsg].hosts\ is\ mandatory,\ can\ not\ be\ null = +failed\ to\ connect\ to\ zstone[%s] = +ZStone\ with\ management\ ip[%s]\ already\ exists = +ZStone[%s]\ has\ %s\ block\ clusters,\ we\ must\ ensure\ no\ other\ clusters\ in\ ZStone\ before\ add\ new\ cluster = +all\ management\ nodes\ must\ have\ the\ same\ username\ and\ password = +all\ storage\ nodes\ must\ have\ the\ same\ username\ and\ password = +config\ parameter\ cannot\ be\ null\ or\ empty = +install\ config\ not\ found\ for\ software\ package\:\ %s = +failed\ to\ authorize\ in\ ZStone\ server = 无法登录 ZStone 服务器 +failed\ to\ get\ licenses\ from\ ZStone = 无法从 ZStone 获取许可证 +failed\ to\ get\ licenses\ from\ ZStone\:\ %s = +failed\ to\ reload\ licenses\ from\ ZStone = 无法从 ZStone 重新加载许可证 +failed\ to\ get\ clusters\ from\ ZStone\ 5.2.x\ /\ 5.3.x = +failed\ to\ get\ clusters\ from\ ZStone\ (version\ >\=\ 5.4.x) = +failed\ to\ get\ clusters\ from\ ZStone = 无法从 ZStone 获取集群信息 +failed\ to\ get\ host\ info\ from\ ZStone = 无法从 ZStone 获取主机信息 +failed\ to\ get\ pool\ info\ from\ ZStone = 无法从 ZStone 获取存储池信息 +failed\ to\ get\ session\ info\ from\ ZStone = 无法从 ZStone 获取会话信息 +failed\ to\ update\ cluster = 无法更新集群信息 +failed\ to\ add\ host = 无法添加主机 +failed\ to\ add\ ZStone\ hosts = +invalid\ session\ for\ ZStone\ server = +ZStone\ session\ expired = +failed\ to\ get\ api\ response\ for\ path[%s] = 无法获取 ZStone API 响应 +ZStone\ API\ failed = 无法调用 ZStone API +Failed\ to\ find\ ZStone\ with\ uuid\ [%s] = # In Module: zwatch unknown\ parameter[%s]\ in\ zwatch\ return\ with\ clause,\ %s = ZWatch Return WITH子句中的未知参数[{0}],{1} @@ -3967,6 +4812,7 @@ invalid\ argument[limit\:%s],\ it\ can't\ be\ a\ negative\ number = 无效的参 invalid\ argument[start\:%s],\ it\ can't\ be\ a\ negative\ number = 参数[开始:{0}]无效,它不能是负数 value[%s]\ is\ not\ a\ Integer\ number = 值(value)[{0}]不是一个整数 unknown\ argument[%s] = 未知参数[{0}] +there\ are\ multiple\ EventFamily\ with\ the\ name[%s],\ you\ must\ specify\ the\ label[%s] = 存在多个同名[{0}]的事件族,必须指定标签[{1}] invalid\ query\ label[%s].\ Allowed\ label\ names\ are\ %s = 无效的查询标签[{0}]。允许标签名是 {1} cannot\ find\ EventFamily[name\:%s,\ namespace\:%s] = 找不到事件族[name:{0}, namespace:{1}] cannot\ find\ EventFamily[name\:%s] = 找不到事件族[name:{0}] diff --git a/conf/i18n_json/i18n_aliyun-storage.json b/conf/i18n_json/i18n_aliyun-storage.json index be85c963bb8..cbc6ad06540 100644 --- a/conf/i18n_json/i18n_aliyun-storage.json +++ b/conf/i18n_json/i18n_aliyun-storage.json @@ -2,35 +2,35 @@ { "raw": "accessKey and keySecret must be set", "en_US": "accessKey and keySecret must be set", - "zh_CN": "", + "zh_CN": "必须设置 accessKey 和 keySecret", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunEbsClient.java" }, { "raw": "ocean api endpoint must not be null", "en_US": "ocean api endpoint must not be null", - "zh_CN": "", + "zh_CN": "ocean API 的 endpoint 不能为空", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunEbsClient.java" }, { "raw": "accessKey and keySecret must be set!", "en_US": "accessKey and keySecret must be set!", - "zh_CN": "", + "zh_CN": "必须设置 accessKey 和 keySecret", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunNasClient.java" }, { "raw": "regionId must be set!", "en_US": "regionId must be set!", - "zh_CN": "", + "zh_CN": "必须设置 regionId", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunNasClient.java" }, { "raw": "no current used key/secret for %s!", "en_US": "no current used key/secret for {0}!", - "zh_CN": "", + "zh_CN": "没有当前使用的 {0} 密钥/密钥对", "arguments": [ "type.toString()" ], @@ -39,14 +39,14 @@ { "raw": "Not a valid message!", "en_US": "Not a valid message!", - "zh_CN": "", + "zh_CN": "不是有效的消息", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunStorageSdkIml.java" }, { "raw": "%s failed, ErrorCode: %s, ErrorMessage: %s", "en_US": "{0} failed, ErrorCode: {1}, ErrorMessage: {2}", - "zh_CN": "", + "zh_CN": "{0} 失败,错误代码:{1},错误消息:{2}", "arguments": [ "action", "result.ErrorCode", @@ -57,7 +57,7 @@ { "raw": "Device Not Ready in %d milli seconds", "en_US": "Device Not Ready in {0} milli seconds", - "zh_CN": "", + "zh_CN": "设备在 {0} 毫秒内未就绪", "arguments": [ "15000" ], @@ -66,7 +66,7 @@ { "raw": "snapshot task cannot finished in %d milliseconds, now progress is %d, status is %s", "en_US": "snapshot task cannot finished in {0} milliseconds, now progress is {1}, status is {2}", - "zh_CN": "", + "zh_CN": "快照任务无法在 {0} 毫秒内完成,当前进度为 {1},状态为 {2}", "arguments": [ "msg.getTimeout()", "result.Content.Progress", @@ -77,7 +77,7 @@ { "raw": "snapshot task status is finished %s", "en_US": "snapshot task status is finished {0}", - "zh_CN": "", + "zh_CN": "快照任务状态已完成 {0}", "arguments": [ "result.Content.TaskStatus" ], @@ -86,14 +86,14 @@ { "raw": "not supported HybridClient", "en_US": "not supported HybridClient", - "zh_CN": "", + "zh_CN": "不支持的 HybridClient", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/AliyunStorageSdkIml.java" }, { "raw": "arg \u0027endpoint\u0027 must be set in %s type", "en_US": "arg \u0027endpoint\u0027 must be set in {0} type", - "zh_CN": "", + "zh_CN": "参数 'endpoint' 必须在 {0} 类型中设置", "arguments": [ "HybridType.AliyunEBS.toString()" ], @@ -102,7 +102,7 @@ { "raw": "not supported datacenter [%s] type here!", "en_US": "not supported datacenter [{0}] type here!", - "zh_CN": "", + "zh_CN": "此处不支持数据中心 [{0}] 类型", "arguments": [ "type.toString()" ], @@ -111,14 +111,14 @@ { "raw": "must indicate zoneId in private aliyun.", "en_US": "must indicate zoneId in private aliyun.", - "zh_CN": "", + "zh_CN": "必须在私有阿里云中指定 zoneId", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/core/identityzone/AliyunPrivateIdentityZoneBase.java" }, { "raw": "make ocean api signature string failed: %s", "en_US": "make ocean api signature string failed: {0}", - "zh_CN": "", + "zh_CN": "生成 ocean API 签名字符串失败:{0}", "arguments": [ "e.getMessage()" ], @@ -127,7 +127,7 @@ { "raw": "url(ocean endpoint) must be set for aliyun ebs backupstorage", "en_US": "url(ocean endpoint) must be set for aliyun ebs backupstorage", - "zh_CN": "", + "zh_CN": "必须为阿里云 EBS 备份存储设置 URL(ocean 端点)", "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/ebs/storage/backup/AliyunEbsBackupStorageApiInterceptor.java" }, @@ -241,6 +241,16 @@ ], "fileName": "src/main/java/org/zstack/aliyun/ebs/storage/primary/AliyunEbsPrimaryStorageApiInterceptor.java" }, + { + "raw": "the aliyun ebs primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters for instantiating the volume", + "en_US": "the aliyun ebs primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume", + "zh_CN": "阿里云 EBS 主存储[uuid:{0}, name:{1}] 在附加的集群中找不到可用于实例化卷的可用主机", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/aliyun/ebs/storage/primary/AliyunEbsPrimaryStorageBase.java" + }, { "raw": "cannot find snapshot from image: %s, maybe the image has been deleted", "en_US": "cannot find snapshot from image: {0}, maybe the image has been deleted", @@ -488,6 +498,17 @@ ], "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunKvmBackend.java" }, + { + "raw": "failed to ping aliyun nas primary storage[uuid:%s] from host[uuid:%s],because %s. disconnect this host-ps connection", + "en_US": "failed to ping aliyun nas primary storage[uuid:{0}] from host[uuid:{1}],because {2}. disconnect this host-ps connection", + "zh_CN": "从主机[uuid:{1}] ping 阿里云 NAS 主存储[uuid:{0}] 失败,因为 {2}。断开此主机-存储连接", + "arguments": [ + "self.getUuid()", + "hostUuid", + "rsp.error" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunKvmBackend.java" + }, { "raw": "operation error, because:%s", "en_US": "operation error, because:{0}", @@ -603,6 +624,21 @@ ], "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunKvmBackend.java" }, + { + "raw": "unable to attach a primary storage to cluster. Kvm host[uuid:%s, name:%s] in cluster has qemu-img with version[%s]; but the primary storage has attached to a cluster that has kvm host[uuid:%s], which has qemu-img with version[%s]. qemu-img version greater than %s is incompatible with versions less than %s, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "en_US": "unable to attach a primary storage to cluster. Kvm host[uuid:{0}, name:{1}] in cluster has qemu-img with version[{2}]; but the primary storage has attached to a cluster that has kvm host[uuid:{3}], which has qemu-img with version[{4}]. qemu-img version greater than {5} is incompatible with versions less than {6}, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "zh_CN": "", + "arguments": [ + "context.getInventory().getUuid()", + "context.getInventory().getName()", + "mine", + "e.getKey()", + "version", + "QCOW3_QEMU_IMG_VERSION", + "QCOW3_QEMU_IMG_VERSION" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunKvmFactory.java" + }, { "raw": "no available host could download imagecache!", "en_US": "no available host could download imagecache!", @@ -610,6 +646,26 @@ "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunKvmFactory.java" }, + { + "raw": "the aliyun nas primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters for instantiating the volume", + "en_US": "the aliyun nas primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" + }, + { + "raw": "the aliyun nas primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters for delete bits on primarystorage", + "en_US": "the aliyun nas primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for delete bits on primarystorage", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" + }, { "raw": "not support", "en_US": "not support", @@ -617,6 +673,26 @@ "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" }, + { + "raw": "the AliyunNAS primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the AliyunNAS primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" + }, + { + "raw": "the Aliyun Nas primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the Aliyun Nas primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" + }, { "raw": "failed to check mount path on host: %s", "en_US": "failed to check mount path on host: {0}", @@ -642,6 +718,15 @@ ], "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" }, + { + "raw": "cannot find available host for operation on primary storage[uuid:%s].", + "en_US": "cannot find available host for operation on primary storage[uuid:{0}].", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/aliyun/nas/storage/primary/AliyunNasPrimaryStorageBase.java" + }, { "raw": "host where vm[uuid:%s] locate is not Connected.", "en_US": "host where vm[uuid:{0}] locate is not Connected.", diff --git a/conf/i18n_json/i18n_baremetal.json b/conf/i18n_json/i18n_baremetal.json index e7f0c0be8b3..a36821d1678 100644 --- a/conf/i18n_json/i18n_baremetal.json +++ b/conf/i18n_json/i18n_baremetal.json @@ -8,6 +8,13 @@ ], "fileName": "src/main/java/org/zstack/baremetal/chassis/BaremetalChassisApiInterceptor.java" }, + { + "raw": "Failed to reach the bare-metal chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS.", + "en_US": "Failed to reach the bare-metal chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal/chassis/BaremetalChassisApiInterceptor.java" + }, { "raw": "Baremetal Chassis of IPMI address %s and IPMI port %d has already been created.", "en_US": "Baremetal Chassis of IPMI address {0} and IPMI port {1} has already been created.", @@ -82,6 +89,24 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal/chassis/BaremetalChassisManagerImpl.java" }, + { + "raw": "Hijacked detected. Your license[%s] permits %s baremetal chassis, but we detect there are %s in the database. You can either delete additional chassis or apply a new license.", + "en_US": "Hijacked detected. Your license[{0}] permits {1} baremetal chassis, but we detect there are {2} in the database. You can either delete additional chassis or apply a new license.", + "zh_CN": "", + "arguments": [ + "licMgr.getLicenseType()", + "allowedChassisNum", + "currChassisNum" + ], + "fileName": "src/main/java/org/zstack/baremetal/chassis/BaremetalChassisManagerImpl.java" + }, + { + "raw": "Insufficient baremetal chassis number licensed. You can either delete additional chassis or apply a new license.", + "en_US": "Insufficient baremetal chassis number licensed. You can either delete additional chassis or apply a new license.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal/chassis/BaremetalChassisManagerImpl.java" + }, { "raw": "failed to delete baremetal chassis %s", "en_US": "failed to delete baremetal chassis {0}", @@ -503,6 +528,19 @@ ], "fileName": "src/main/java/org/zstack/baremetal/pxeserver/BaremetalPxeServerApiInterceptor.java" }, + { + "raw": "baremetal pxeserver[uuid:%s] is not compatible with baremetal instances in cluster[uuid:%s], existing nic ip %s is out of pxeserver dhcp range %s ~ %s.", + "en_US": "baremetal pxeserver[uuid:{0}] is not compatible with baremetal instances in cluster[uuid:{1}], existing nic ip {2} is out of pxeserver dhcp range {3} ~ {4}.", + "zh_CN": "", + "arguments": [ + "msg.getPxeServerUuid()", + "msg.getClusterUuid()", + "ip", + "begin", + "end" + ], + "fileName": "src/main/java/org/zstack/baremetal/pxeserver/BaremetalPxeServerApiInterceptor.java" + }, { "raw": "baremetal pxeserver[uuid: %s] not attached to cluster[uuid: %s]", "en_US": "baremetal pxeserver[uuid: {0}] not attached to cluster[uuid: {1}]", @@ -614,6 +652,16 @@ ], "fileName": "src/main/java/org/zstack/baremetal/pxeserver/BaremetalPxeServerBase.java" }, + { + "raw": "the uuid of baremtal pxeserver agent changed[expected:%s, actual:%s], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "en_US": "the uuid of baremtal pxeserver agent changed[expected:{0}, actual:{1}], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "ret.uuid" + ], + "fileName": "src/main/java/org/zstack/baremetal/pxeserver/BaremetalPxeServerBase.java" + }, { "raw": "operation error, because:%s", "en_US": "operation error, because:{0}", diff --git a/conf/i18n_json/i18n_baremetal2.json b/conf/i18n_json/i18n_baremetal2.json index 4d3abaadfe7..f677804a250 100644 --- a/conf/i18n_json/i18n_baremetal2.json +++ b/conf/i18n_json/i18n_baremetal2.json @@ -17,6 +17,15 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisApiInterceptor.java" }, + { + "raw": "cannot find the cluster of baremetal2 chassis[uuid:%s], maybe it doesn\u0027t exist", + "en_US": "cannot find the cluster of baremetal2 chassis[uuid:{0}], maybe it doesn\u0027t exist", + "zh_CN": "", + "arguments": [ + "chassisUuid" + ], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisApiInterceptor.java" + }, { "raw": "there is no baremetal2 gateway found in cluster[uuid:%s]", "en_US": "there is no baremetal2 gateway found in cluster[uuid:{0}]", @@ -44,6 +53,15 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisApiInterceptor.java" }, + { + "raw": "baremetal2 provision network[uuid:%s] is not usable, make sure it\u0027s Enabled", + "en_US": "baremetal2 provision network[uuid:{0}] is not usable, make sure it\u0027s Enabled", + "zh_CN": "", + "arguments": [ + "provisionNetworkUuid" + ], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisApiInterceptor.java" + }, { "raw": "wrong baremetal2 chassis hardware info format: %s", "en_US": "wrong baremetal2 chassis hardware info format: {0}", @@ -120,6 +138,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisManagerImpl.java" }, + { + "raw": "no available baremetal2 chassis found in baremetal2 clusters[uuids:%s]", + "en_US": "no available baremetal2 chassis found in baremetal2 clusters[uuids:{0}]", + "zh_CN": "", + "arguments": [ + "msg.getRequiredClusterUuids()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisManagerImpl.java" + }, { "raw": "Cannot find BareMetal2 Chassis[uuid:%s], it may have been deleted", "en_US": "Cannot find BareMetal2 Chassis[uuid:{0}], it may have been deleted", @@ -136,6 +163,24 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisManagerImpl.java" }, + { + "raw": "Hijacked detected. Your license[%s] permits %s elastic-baremetal chassis, but we detect there are %s in the database. You can either delete additional chassis or apply a new license.", + "en_US": "Hijacked detected. Your license[{0}] permits {1} elastic-baremetal chassis, but we detect there are {2} in the database. You can either delete additional chassis or apply a new license.", + "zh_CN": "", + "arguments": [ + "licMgr.getLicenseType()", + "allowedChassisNum", + "currChassisNum" + ], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisManagerImpl.java" + }, + { + "raw": "Insufficient elastic-baremetal chassis number licensed. You can either delete additional chassis or apply a new license.", + "en_US": "Insufficient elastic-baremetal chassis number licensed. You can either delete additional chassis or apply a new license.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/BareMetal2ChassisManagerImpl.java" + }, { "raw": "not supported", "en_US": "not supported", @@ -162,6 +207,13 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/chassis/ipmi/BareMetal2IpmiChassisApiInterceptor.java" }, + { + "raw": "Failed to reach the baremetal2 chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS.", + "en_US": "Failed to reach the baremetal2 chassis, please make sure: 1. the IPMI connection is active; 2. the IPMI Address, Port, Username and Password are correct; 3. IPMI Over LAN is enabled in BIOS.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal2/chassis/ipmi/BareMetal2IpmiChassisApiInterceptor.java" + }, { "raw": "BareMetal2 Chassis of IPMI address %s and IPMI port %d has already been created.", "en_US": "BareMetal2 Chassis of IPMI address {0} and IPMI port {1} has already been created.", @@ -301,6 +353,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/cluster/BareMetal2ClusterFactory.java" }, + { + "raw": "l2 network should not have the same interface name with provision network that\u0027s already attached to the cluster", + "en_US": "l2 network should not have the same interface name with provision network that\u0027s already attached to the cluster", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal2/cluster/BareMetal2ClusterFactory.java" + }, { "raw": "Can not attach third-party ceph with token into aarch64 cluster.", "en_US": "Can not attach third-party ceph with token into aarch64 cluster.", @@ -315,6 +374,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/cluster/LocalStorageAttachBm2ClusterMetric.java" }, + { + "raw": "failed to delete convert volume to chassis local disk configurations in gateway[uuid:%s] for baremetal2 instance[uuid:%s]", + "en_US": "failed to delete convert volume to chassis local disk configurations in gateway[uuid:{0}] for baremetal2 instance[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "msg.getInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "baremetal2 instance[uuid:%s] is not connected, cannot detach provision nic from bonding", "en_US": "baremetal2 instance[uuid:{0}] is not connected, cannot detach provision nic from bonding", @@ -324,6 +393,17 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to detach provision nic to bonding on baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to detach provision nic to bonding on baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "self.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "baremetal2 instance[uuid:%s] not connected, cannot attach provision nic to bond", "en_US": "baremetal2 instance[uuid:{0}] not connected, cannot attach provision nic to bond", @@ -333,6 +413,17 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to attach provision nic to bonding on baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to attach provision nic to bonding on baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "self.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "chassis:%s disk does not have wwn info, please inspect chassis and try again", "en_US": "chassis:{0} disk does not have wwn info, please inspect chassis and try again", @@ -398,6 +489,73 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to create provision configurations for baremetal2 instance[uuid:%s] in gateway[uuid:%s], because %s", + "en_US": "failed to create provision configurations for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "self.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to delete provision configurations for baremetal2 instance[uuid:%s] in gateway[uuid:%s], because %s", + "en_US": "failed to delete provision configurations for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "self.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to create console proxy for baremetal2 instance[uuid:%s] in gateway[uuid:%s], because %s", + "en_US": "failed to create console proxy for baremetal2 instance[uuid:{0}] in gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "self.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to change default network from l3[uuid:%s] to l3[uuid:%s] for baremetal2 instance[uuid:%s], because %s", + "en_US": "failed to change default network from l3[uuid:{0}] to l3[uuid:{1}] for baremetal2 instance[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "msg.getOldDefaultL3Uuid()", + "msg.getNewDefaultL3Uuid()", + "msg.getInstanceUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to ping baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to ping baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "msg.getGatewayUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to change the password of baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to change the password of baremetal2 instance[uuid:{0}] through gateway[uuid:{1}], because {2}", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()", + "msg.getGatewayUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "failed to power on baremetal2 chassis[uuid:%s] using ipmitool", "en_US": "failed to power on baremetal2 chassis[uuid:{0}] using ipmitool", @@ -416,6 +574,16 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to power off baremetal2 instance[uuid:%s] by bm agent, because %s", + "en_US": "failed to power off baremetal2 instance[uuid:{0}] by bm agent, because {1}", + "zh_CN": "", + "arguments": [ + "bm.getUuid()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "baremetal2 chassis[uuid:%s] is still not POWER_OFF %d seconds later", "en_US": "baremetal2 chassis[uuid:{0}] is still not POWER_OFF {1} seconds later", @@ -452,6 +620,18 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to attach nic[uuid:%s] to baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to attach nic[uuid:{0}] to baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "nicUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "baremetal2 instance[uuid:%s] is not connected, cannot detach nic from it", "en_US": "baremetal2 instance[uuid:{0}] is not connected, cannot detach nic from it", @@ -461,6 +641,42 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to detach nic[uuid:%s] from baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to detach nic[uuid:{0}] from baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "nicUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to prepare volume[uuid:%s] for baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to prepare volume[uuid:{0}] for baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to attach volume[uuid:%s] to baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to attach volume[uuid:{0}] to baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "baremetal2 instance[uuid:%s] is not connected, cannot attach volume to it", "en_US": "baremetal2 instance[uuid:{0}] is not connected, cannot attach volume to it", @@ -470,6 +686,18 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to get volume[uuid:%s] lunid for baremetal2 instance[uuid:%s] in gateway[uuid:%s], because %s", + "en_US": "failed to get volume[uuid:{0}] lunid for baremetal2 instance[uuid:{1}] in gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "failed to get gateway ips of the access path[iscsiPath: %s] for block volume %s, because %s", "en_US": "failed to get gateway ips of the access path[iscsiPath: {0}] for block volume {1}, because {2}", @@ -481,6 +709,30 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" }, + { + "raw": "failed to detach volume[uuid:%s] from baremetal2 instance[uuid:%s] through gateway[uuid:%s], because %s", + "en_US": "failed to detach volume[uuid:{0}] from baremetal2 instance[uuid:{1}] through gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, + { + "raw": "failed to destroy volume[uuid:%s] for baremetal2 instance[uuid:%s] in gateway[uuid:%s], because %s", + "en_US": "failed to destroy volume[uuid:{0}] for baremetal2 instance[uuid:{1}] in gateway[uuid:{2}], because {3}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "bmUuid", + "gatewayUuid", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2Gateway.java" + }, { "raw": "all ceph mons of primary storage[uuid:%s] are not in Connected state", "en_US": "all ceph mons of primary storage[uuid:{0}] are not in Connected state", @@ -630,6 +882,15 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2GatewayApiInterceptor.java" }, + { + "raw": "cannot change the cluster of baremetal2 gateway[uuid:%s] when there are running instances depending on it", + "en_US": "cannot change the cluster of baremetal2 gateway[uuid:{0}] when there are running instances depending on it", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2GatewayApiInterceptor.java" + }, { "raw": "baremetal2 instance[uuid:%s] doesn\u0027t exist, cannot generate its console url", "en_US": "baremetal2 instance[uuid:{0}] doesn\u0027t exist, cannot generate its console url", @@ -637,6 +898,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2GatewayConsoleHypervisorBackend.java" }, + { + "raw": "baremetal2 gateway[uuid:%s] is not Connected, cannot generate console url for instance[uuid:%s]", + "en_US": "baremetal2 gateway[uuid:{0}] is not Connected, cannot generate console url for instance[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "bm.getGatewayUuid()", + "bm.getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/gateway/BareMetal2GatewayConsoleHypervisorBackend.java" + }, { "raw": "cluster[uuid:%s] hypervisorType is not %s", "en_US": "cluster[uuid:{0}] hypervisorType is not {1}", @@ -782,6 +1053,24 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceAllocateClusterFlow.java" }, + { + "raw": "only baremetal2 clusters[uuid:%s] meet the needs for chassis and gateway, but they have no provision network attached", + "en_US": "only baremetal2 clusters[uuid:{0}] meet the needs for chassis and gateway, but they have no provision network attached", + "zh_CN": "", + "arguments": [ + "clusterUuids" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceAllocateClusterFlow.java" + }, + { + "raw": "name[%s] is invalid, the name requirement: 1~128 characters, support uppercase and lowercase letters, numbers, underscores, and hyphens; It can only start with uppercase and lowercase letters; It does not start or end with a space ", + "en_US": "name[{0}] is invalid, the name requirement: 1~128 characters, support uppercase and lowercase letters, numbers, underscores, and hyphens; It can only start with uppercase and lowercase letters; It does not start or end with a space ", + "zh_CN": "", + "arguments": [ + "name" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "only support vpc network support attach eip on baremetal2 instance", "en_US": "only support vpc network support attach eip on baremetal2 instance", @@ -828,6 +1117,15 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "baremetal2 instance[uuid:%s] is running but its agent is not Connected", + "en_US": "baremetal2 instance[uuid:{0}] is running but its agent is not Connected", + "zh_CN": "", + "arguments": [ + "bm.getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "make sure all baremetal2 gateways on provision network[uuid:%s] are Connected", "en_US": "make sure all baremetal2 gateways on provision network[uuid:{0}] are Connected", @@ -837,6 +1135,24 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "baremetal2 instance[uuid:%s] is not stopped can not change its chassis offering", + "en_US": "baremetal2 instance[uuid:{0}] is not stopped can not change its chassis offering", + "zh_CN": "", + "arguments": [ + "msg.getInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 instance[uuid:%s] has not been allocated a chassis, start the instance and try again", + "en_US": "baremetal2 instance[uuid:{0}] has not been allocated a chassis, start the instance and try again", + "zh_CN": "", + "arguments": [ + "msg.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "only l3 network with ip version %d is supported by baremetal2 instance", "en_US": "only l3 network with ip version {0} is supported by baremetal2 instance", @@ -880,6 +1196,17 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "baremetal2 instance[uuid:%s] running on chassis[uuid:%s], which doesn\u0027t have non-provisioning nic with mac address %s", + "en_US": "baremetal2 instance[uuid:{0}] running on chassis[uuid:{1}], which doesn\u0027t have non-provisioning nic with mac address {2}", + "zh_CN": "", + "arguments": [ + "bm.getUuid()", + "bm.getChassisUuid()", + "msg.getCustomMac()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "mac address %s has already been used, try another one", "en_US": "mac address {0} has already been used, try another one", @@ -912,6 +1239,35 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "cluster[uuid:%s] is not an Enabled baremetal2 cluster, cannot start instance[uuid:%s] in it", + "en_US": "cluster[uuid:{0}] is not an Enabled baremetal2 cluster, cannot start instance[uuid:{1}] in it", + "zh_CN": "", + "arguments": [ + "msg.getClusterUuid()", + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 gateway[uuid:%s] does not exist or is not Enabled or Connected", + "en_US": "baremetal2 gateway[uuid:{0}] does not exist or is not Enabled or Connected", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 gateway[uuid:%s] is not in cluster [uuid:%s]", + "en_US": "baremetal2 gateway[uuid:{0}] is not in cluster [uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()", + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "please specify chassis uuid or chassis offering uuid to start baremetal2 instance[uuid:%s]", "en_US": "please specify chassis uuid or chassis offering uuid to start baremetal2 instance[uuid:{0}]", @@ -962,6 +1318,16 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "baremetal2 chassis[uuid:%s] is not belonging to chassis offering[uuid:%s]", + "en_US": "baremetal2 chassis[uuid:{0}] is not belonging to chassis offering[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getChassisUuid()", + "bm.getChassisOfferingUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "baremetal2 chassis[uuid:%s] is not Enabled", "en_US": "baremetal2 chassis[uuid:{0}] is not Enabled", @@ -980,6 +1346,24 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "zone[uuid:%s] is specified but it\u0027s not Enabled, can not create baremetal2 instance from it", + "en_US": "zone[uuid:{0}] is specified but it\u0027s not Enabled, can not create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "cluster[uuid:%s] is specified but it\u0027s not an Enabled baremetal2 cluster, can not create baremetal2 instance from it", + "en_US": "cluster[uuid:{0}] is specified but it\u0027s not an Enabled baremetal2 cluster, can not create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "neither chassisUuid nor chassisOfferingUuid is set when create baremetal2 instance", "en_US": "neither chassisUuid nor chassisOfferingUuid is set when create baremetal2 instance", @@ -994,6 +1378,61 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "baremetal2 chassis[uuid:%s] is not Enabled, can\u0027t create baremetal2 instance from it", + "en_US": "baremetal2 chassis[uuid:{0}] is not Enabled, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getChassisUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 chassis[uuid:%s] is not Available, can\u0027t create baremetal2 instance from it", + "en_US": "baremetal2 chassis[uuid:{0}] is not Available, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getChassisUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 chassis offering[uuid:%s] is not Enabled, can\u0027t create baremetal2 instance from it", + "en_US": "baremetal2 chassis offering[uuid:{0}] is not Enabled, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getChassisOfferingUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 gateway[uuid:%s] is not Enabled, can\u0027t create baremetal2 instance from it", + "en_US": "baremetal2 gateway[uuid:{0}] is not Enabled, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 gateway[uuid:%s] is not Connected, can\u0027t create baremetal2 instance from it", + "en_US": "baremetal2 gateway[uuid:{0}] is not Connected, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "baremetal2 gateway[uuid:%s] is not in the same cluster with chassis[uuid:%s]", + "en_US": "baremetal2 gateway[uuid:{0}] is not in the same cluster with chassis[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getGatewayUuid()", + "msg.getChassisUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "image cannot be empty unless chassis is in direct mode", "en_US": "image cannot be empty unless chassis is in direct mode", @@ -1027,6 +1466,62 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "image[uuid:%s] is not Enabled, can\u0027t create baremetal2 instance from it", + "en_US": "image[uuid:{0}] is not Enabled, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getImageUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "image[uuid:%s] is not Ready, can\u0027t create baremetal2 instance from it", + "en_US": "image[uuid:{0}] is not Ready, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "msg.getImageUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "image[uuid:%s] is of mediaType: %s, only RootVolumeTemplate can be used to create baremetal2 instance", + "en_US": "image[uuid:{0}] is of mediaType: {1}, only RootVolumeTemplate can be used to create baremetal2 instance", + "zh_CN": "", + "arguments": [ + "msg.getImageUuid()", + "image.getMediaType()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "image[uuid:%s] is of format: %s, only %s can be used to create baremetal2 instance", + "en_US": "image[uuid:{0}] is of format: {1}, only {2} can be used to create baremetal2 instance", + "zh_CN": "", + "arguments": [ + "image.getFormat()", + "BareMetal2InstanceConstant.IMAGE_FORMAT_FOR_BM" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "image[uuid:%s] is not baremetal2 image, can\u0027t create baremetal2 instance from it", + "en_US": "image[uuid:{0}] is not baremetal2 image, can\u0027t create baremetal2 instance from it", + "zh_CN": "", + "arguments": [ + "image.getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "only image with boot mode %s is supported to create baremetal2 instance", + "en_US": "only image with boot mode {0} is supported to create baremetal2 instance", + "zh_CN": "", + "arguments": [ + "BareMetal2GlobalProperty.BAREMETAL2_SUPPORTED_BOOT_MODE" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "different boot mode between the image and chassis/offering", "en_US": "different boot mode between the image and chassis/offering", @@ -1034,6 +1529,35 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" }, + { + "raw": "the architecture of baremetal2 cluster[arch:%s] and image[arch:%s] don\u0027t match", + "en_US": "the architecture of baremetal2 cluster[arch:{0}] and image[arch:{1}] don\u0027t match", + "zh_CN": "", + "arguments": [ + "clusterArchitecture", + "image.getArchitecture()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "not all disk offerings[uuids:%s] are Enabled, can not create baremetal2 instance from them", + "en_US": "not all disk offerings[uuids:{0}] are Enabled, can not create baremetal2 instance from them", + "zh_CN": "", + "arguments": [ + "msg.getDataDiskOfferingUuids()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, + { + "raw": "the primary storage[%s] of the root volume and the primary storage[%s] of the data volume are not in the same cluster", + "en_US": "the primary storage[{0}] of the root volume and the primary storage[{1}] of the data volume are not in the same cluster", + "zh_CN": "", + "arguments": [ + "msg.getPrimaryStorageUuidForRootVolume()", + "msg.getPrimaryStorageUuidForDataVolume()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceApiInterceptor.java" + }, { "raw": "cannot decide which zone the baremetal2 instance should be created in", "en_US": "cannot decide which zone the baremetal2 instance should be created in", @@ -1048,6 +1572,29 @@ "arguments": [], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceBase.java" }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage attached to the zone[uuid:%s]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getZoneUuid()", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceImageSelectBackupStorageFlow.java" + }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {1}, uuid:{2}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceImageSelectBackupStorageFlow.java" + }, { "raw": "no backup storage attached to the zone[uuid:%s] contains the ISO[uuid:%s]", "en_US": "no backup storage attached to the zone[uuid:{0}] contains the ISO[uuid:{1}]", @@ -1085,6 +1632,28 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/instance/BareMetal2InstanceManagerImpl.java" }, + { + "raw": "there already exists a baremetal2 provision network with dhcpInterface \u003d %s, dhcpRangeStartIp \u003d %s, dhcpRangeEndIp \u003d %s, dhcpRangeNetmask \u003d %s, dhcpRangeGateway \u003d %s", + "en_US": "there already exists a baremetal2 provision network with dhcpInterface \u003d {0}, dhcpRangeStartIp \u003d {1}, dhcpRangeEndIp \u003d {2}, dhcpRangeNetmask \u003d {3}, dhcpRangeGateway \u003d {4}", + "zh_CN": "", + "arguments": [ + "dhcpInterface", + "dhcpRangeStartIp", + "dhcpRangeEndIp", + "dhcpRangeNetmask", + "dhcpRangeGateway" + ], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, + { + "raw": "cannot update baremetal2 provision network[uuid:%s] dhcp configuration when there are instances depending on it", + "en_US": "cannot update baremetal2 provision network[uuid:{0}] dhcp configuration when there are instances depending on it", + "zh_CN": "", + "arguments": [ + "msg.getNetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, { "raw": "baremetal2 provision network dhcp range netmask %s is invalid", "en_US": "baremetal2 provision network dhcp range netmask {0} is invalid", @@ -1104,6 +1673,15 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" }, + { + "raw": "cannot delete baremetal2 provision network[uuid:%s] when there are instances depending on it", + "en_US": "cannot delete baremetal2 provision network[uuid:{0}] when there are instances depending on it", + "zh_CN": "", + "arguments": [ + "msg.getNetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, { "raw": "cannot attach baremetal2 provision network[uuid:%s] to non-baremetal2 cluster[uuid:%s]", "en_US": "cannot attach baremetal2 provision network[uuid:{0}] to non-baremetal2 cluster[uuid:{1}]", @@ -1144,6 +1722,32 @@ ], "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" }, + { + "raw": "cannot attach baremetal2 provision network[uuid:%s] to cluster[uuid:%s], because we need to make sure that every gateway attached to the clusters that have the same provision network attached", + "en_US": "cannot attach baremetal2 provision network[uuid:{0}] to cluster[uuid:{1}], because we need to make sure that every gateway attached to the clusters that have the same provision network attached", + "zh_CN": "", + "arguments": [ + "msg.getNetworkUuid()", + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, + { + "raw": "provision network should not have the same interface name with l2 networks that are already attached to the cluster", + "en_US": "provision network should not have the same interface name with l2 networks that are already attached to the cluster", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, + { + "raw": "cannot detach baremetal2 provision network[uuid:%s] when there are running instances depending on it", + "en_US": "cannot detach baremetal2 provision network[uuid:{0}] when there are running instances depending on it", + "zh_CN": "", + "arguments": [ + "msg.getNetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/baremetal2/provisionnetwork/BareMetal2ProvisionNetworkApiInterceptor.java" + }, { "raw": "networkUuids is empty", "en_US": "networkUuids is empty", diff --git a/conf/i18n_json/i18n_block-primary-storage.json b/conf/i18n_json/i18n_block-primary-storage.json index edecc242fda..3b9b2070386 100644 --- a/conf/i18n_json/i18n_block-primary-storage.json +++ b/conf/i18n_json/i18n_block-primary-storage.json @@ -15,6 +15,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/block/BlockPrimaryStorageBase.java" }, + { + "raw": "the block primary storage[uuid:%s, name:%s] can not find any available host in attached clusters for instantiating the volume", + "en_US": "the block primary storage[uuid:{0}, name:{1}] can not find any available host in attached clusters for instantiating the volume", + "zh_CN": "块主存储[uuid:{0}, name:{1}] 在附加的集群中找不到可用于实例化卷的可用主机", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/block/BlockPrimaryStorageBase.java" + }, { "raw": "fail to find a host to map for volume %s", "en_US": "fail to find a host to map for volume {0}", @@ -150,6 +160,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/block/BlockPrimaryStorageFactory.java" }, + { + "raw": "not support take volumes snapshots on multiple ps when including ceph", + "en_US": "not support take volumes snapshots on multiple ps when including ceph", + "zh_CN": "当包含 Ceph 时,不支持在多个主存储上同时对卷进行快照", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/primary/block/BlockPrimaryStorageFactory.java" + }, { "raw": "fail to map lun to host before attach volume to vm", "en_US": "fail to map lun to host before attach volume to vm", diff --git a/conf/i18n_json/i18n_cbd.json b/conf/i18n_json/i18n_cbd.json index c22e762bdeb..c3f6dcd636d 100644 --- a/conf/i18n_json/i18n_cbd.json +++ b/conf/i18n_json/i18n_cbd.json @@ -1,4 +1,14 @@ [ + { + "raw": "invalid mdsUrl[%s], the sshUsername:sshPassword part is invalid. A valid mdsUrl is in format of %s", + "en_US": "invalid mdsUrl[{0}], the sshUsername:sshPassword part is invalid. A valid mdsUrl is in format of {1}", + "zh_CN": "", + "arguments": [ + "url", + "MDS_URL_FORMAT" + ], + "fileName": "src/main/java/org/zstack/cbd/MdsUri.java" + }, { "raw": "invalid mdsUrl[%s]. SSH username and password must be separated by \u0027:\u0027 and cannot be empty. A valid monUrl format is %s", "en_US": "invalid mdsUrl[{0}]. SSH username and password must be separated by \u0027:\u0027 and cannot be empty. A valid monUrl format is {1}", @@ -9,6 +19,26 @@ ], "fileName": "src/main/java/org/zstack/cbd/MdsUri.java" }, + { + "raw": "invalid mdsUrl[%s], hostname cannot be null. A valid mdsUrl is in format of %s", + "en_US": "invalid mdsUrl[{0}], hostname cannot be null. A valid mdsUrl is in format of {1}", + "zh_CN": "", + "arguments": [ + "url", + "MDS_URL_FORMAT" + ], + "fileName": "src/main/java/org/zstack/cbd/MdsUri.java" + }, + { + "raw": "invalid mdsUrl[%s], the ssh port is greater than 65535 or smaller than 1. A valid mdsUrl is in format of %s", + "en_US": "invalid mdsUrl[{0}], the ssh port is greater than 65535 or smaller than 1. A valid mdsUrl is in format of {1}", + "zh_CN": "", + "arguments": [ + "url", + "MDS_URL_FORMAT" + ], + "fileName": "src/main/java/org/zstack/cbd/MdsUri.java" + }, { "raw": "operation error, because:%s", "en_US": "operation error, because:{0}", diff --git a/conf/i18n_json/i18n_cdp.json b/conf/i18n_json/i18n_cdp.json index b84cb12d431..3690cb8d212 100644 --- a/conf/i18n_json/i18n_cdp.json +++ b/conf/i18n_json/i18n_cdp.json @@ -436,6 +436,22 @@ ], "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" }, + { + "raw": "The operation has volume[uuid: %s] that will take chain type snapshot. Therefore, you could not do this operation when a CDP task is running on the VM instance.", + "en_US": "The operation has volume[uuid: {0}] that will take chain type snapshot. Therefore, you could not do this operation when a CDP task is running on the VM instance.", + "zh_CN": "", + "arguments": [ + "msg.getVolume().getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/CdpBackupStorageManagerImpl.java" + }, + { + "raw": "Could not attach volume.The VM instance is running a CDP task. After the volume is attached, the capacity required for full backup will exceed the CDP task planned size. Please plan the size properly and try again.", + "en_US": "Could not attach volume.The VM instance is running a CDP task. After the volume is attached, the capacity required for full backup will exceed the CDP task planned size. Please plan the size properly and try again.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/cdp/CdpTaskMonitor.java" + }, { "raw": "The VM[%s] for volume[%s] is running CDP, cannot resize now.", "en_US": "The VM[{0}] for volume[{1}] is running CDP, cannot resize now.", @@ -601,6 +617,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/cdp/LicenseChecker.java" }, + { + "raw": "Insufficient CDP VM number licensed. Your license permits %d CDP VM, there are %d CDP VM used. You can stop or disable some CDP tasks or apply a new license.", + "en_US": "Insufficient CDP VM number licensed. Your license permits {0} CDP VM, there are {1} CDP VM used. You can stop or disable some CDP tasks or apply a new license.", + "zh_CN": "", + "arguments": [ + "allowedVmNum", + "cdpUsedVmNum" + ], + "fileName": "src/main/java/org/zstack/storage/cdp/LicenseChecker.java" + }, { "raw": "kvmagent restarted", "en_US": "kvmagent restarted", diff --git a/conf/i18n_json/i18n_ceph.json b/conf/i18n_json/i18n_ceph.json index 06d074aceb5..a78fa1ac89b 100644 --- a/conf/i18n_json/i18n_ceph.json +++ b/conf/i18n_json/i18n_ceph.json @@ -159,6 +159,17 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/backup/CephBackupStorageBase.java" }, + { + "raw": "there is another CEPH backup storage[name:%s, uuid:%s] with the same FSID[%s], you cannot add the same CEPH setup as two different backup storage", + "en_US": "there is another CEPH backup storage[name:{0}, uuid:{1}] with the same FSID[{2}], you cannot add the same CEPH setup as two different backup storage", + "zh_CN": "", + "arguments": [ + "otherCeph.getName()", + "otherCeph.getUuid()", + "fsId" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/backup/CephBackupStorageBase.java" + }, { "raw": "image[uuid: %s] is not on backup storage[uuid:%s, name:%s]", "en_US": "image[uuid: {0}] is not on backup storage[uuid:{1}, name:{2}]", @@ -184,6 +195,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/ceph/backup/CephBackupStorageBase.java" }, + { + "raw": "cannot update status of the ceph backup storage mon[uuid:%s], it has been deleted.This error can be ignored", + "en_US": "cannot update status of the ceph backup storage mon[uuid:{0}], it has been deleted.This error can be ignored", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/backup/CephBackupStorageMonBase.java" + }, { "raw": "Ceph bs[uuid\u003d%s] pool name not found", "en_US": "Ceph bs[uuid\u003d{0}] pool name not found", @@ -200,6 +220,20 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephDeleteVolumeChainGC.java" }, + { + "raw": "the backup storage[uuid:%s, name:%s, fsid:%s] is not in the same ceph cluster with the primary storage[uuid:%s, name:%s, fsid:%s]", + "en_US": "the backup storage[uuid:{0}, name:{1}, fsid:{2}] is not in the same ceph cluster with the primary storage[uuid:{3}, name:{4}, fsid:{5}]", + "zh_CN": "", + "arguments": [ + "backupStorage.getUuid()", + "backupStorage.getName()", + "bsFsid", + "self.getUuid()", + "self.getName()", + "getSelf().getFsid()" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, { "raw": "fsid is not same between ps[%s] and bs[%s], create template is forbidden.", "en_US": "fsid is not same between ps[{0}] and bs[{1}], create template is forbidden.", @@ -228,6 +262,15 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" }, + { + "raw": "unable to connect to the ceph primary storage[uuid:%s], failed to connect all ceph monitors.", + "en_US": "unable to connect to the ceph primary storage[uuid:{0}], failed to connect all ceph monitors.", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, { "raw": "ceph primary storage[uuid:%s] may have been deleted.", "en_US": "ceph primary storage[uuid:{0}] may have been deleted.", @@ -244,6 +287,29 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" }, + { + "raw": "there is another CEPH primary storage[name:%s, uuid:%s] with the same FSID[%s], you cannot add the same CEPH setup as two different primary storage", + "en_US": "there is another CEPH primary storage[name:{0}, uuid:{1}] with the same FSID[{2}], you cannot add the same CEPH setup as two different primary storage", + "zh_CN": "", + "arguments": [ + "otherCeph.getName()", + "otherCeph.getUuid()", + "fsId" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, + { + "raw": "the ceph primary storage[uuid:%s, name:%s] is down, as one mon[uuid:%s] reports an operation failure[%s]", + "en_US": "the ceph primary storage[uuid:{0}, name:{1}] is down, as one mon[uuid:{2}] reports an operation failure[{3}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()", + "mon.getSelf().getUuid()", + "res.error" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, { "raw": "unable to connect mons", "en_US": "unable to connect mons", @@ -258,6 +324,17 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" }, + { + "raw": "the mon[ip:%s] returns a fsid[%s] different from the current fsid[%s] of the cep cluster,are you adding a mon not belonging to current cluster mistakenly?", + "en_US": "the mon[ip:{0}] returns a fsid[{1}] different from the current fsid[{2}] of the cep cluster,are you adding a mon not belonging to current cluster mistakenly?", + "zh_CN": "", + "arguments": [ + "base.getSelf().getHostname()", + "fsid", + "getSelf().getFsid()" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, { "raw": "%s", "en_US": "{0}", @@ -291,6 +368,16 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" }, + { + "raw": "cannot find backupstorage to download image [%s] to primarystorage [%s] due to lack of Ready and accessible image", + "en_US": "cannot find backupstorage to download image [{0}] to primarystorage [{1}] due to lack of Ready and accessible image", + "zh_CN": "", + "arguments": [ + "volume.getRootImageUuid()", + "getSelf().getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java" + }, { "raw": "allocated url not found", "en_US": "allocated url not found", @@ -316,6 +403,13 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java" }, + { + "raw": "not support take volumes snapshots on multiple ps when including ceph", + "en_US": "not support take volumes snapshots on multiple ps when including ceph", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java" + }, { "raw": "ceph pool conflict, the ceph pool specified by the instance offering is %s, and the ceph pool specified in the creation parameter is %s", "en_US": "ceph pool conflict, the ceph pool specified by the instance offering is {0}, and the ceph pool specified in the creation parameter is {1}", @@ -385,6 +479,15 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java" }, + { + "raw": "cannot update status of the ceph primary storage mon[uuid:%s], it has been deleted.This error can be ignored", + "en_US": "cannot update status of the ceph primary storage mon[uuid:{0}], it has been deleted.This error can be ignored", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageMonBase.java" + }, { "raw": "Ceph ps[uuid\u003d%s] root pool name not found", "en_US": "Ceph ps[uuid\u003d{0}] root pool name not found", @@ -394,6 +497,24 @@ ], "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageMonBase.java" }, + { + "raw": "invalid uri, correct example is ceph://$POOLNAME/$VOLUMEUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID", + "en_US": "invalid uri, correct example is ceph://$POOLNAME/$VOLUMEUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/CephRequiredUrlParser.java" + }, + { + "raw": "required ceph pool[uuid:%s] cannot satisfy conditions [availableSize \u003e %s bytes], current available size %s", + "en_US": "required ceph pool[uuid:{0}] cannot satisfy conditions [availableSize \u003e {1} bytes], current available size {2}", + "zh_CN": "", + "arguments": [ + "poolUuid", + "size", + "originAvailableCapacity" + ], + "fileName": "src/main/java/org/zstack/storage/ceph/primary/capacity/CephOsdGroupCapacityHelper.java" + }, { "raw": "cannot find ceph pool [%s] related osdgroup", "en_US": "cannot find ceph pool [{0}] related osdgroup", diff --git a/conf/i18n_json/i18n_cloudformation.json b/conf/i18n_json/i18n_cloudformation.json index e4824b9f6ae..784ba134643 100644 --- a/conf/i18n_json/i18n_cloudformation.json +++ b/conf/i18n_json/i18n_cloudformation.json @@ -267,6 +267,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/cloudformation/template/decoder/AbstractCfnRootDecoder.java" }, + { + "raw": "Condition key: %s only support 1 element in the json object of value, but got %d elements!", + "en_US": "Condition key: {0} only support 1 element in the json object of value, but got {1} elements!", + "zh_CN": "", + "arguments": [ + "key", + "es.size()" + ], + "fileName": "src/main/java/org/zstack/cloudformation/template/decoder/ConditionDecoder.java" + }, { "raw": "Value must be boolean in \u0027Condition\u0027 field", "en_US": "Value must be boolean in \u0027Condition\u0027 field", diff --git a/conf/i18n_json/i18n_compute.json b/conf/i18n_json/i18n_compute.json index c7b0d69fb9a..e2a90a68919 100644 --- a/conf/i18n_json/i18n_compute.json +++ b/conf/i18n_json/i18n_compute.json @@ -8,6 +8,16 @@ ], "fileName": "src/main/java/org/zstack/compute/allocator/AttachedVolumePrimaryStorageAllocatorFlow.java" }, + { + "raw": "the backup storage[uuid:%s, type:%s] requires bound primary storage, however, the primary storage has not been added", + "en_US": "the backup storage[uuid:{0}, type:{1}] requires bound primary storage, however, the primary storage has not been added", + "zh_CN": "", + "arguments": [ + "spec.getRequiredBackupStorageUuid()", + "bsType" + ], + "fileName": "src/main/java/org/zstack/compute/allocator/BackupStorageSelectPrimaryStorageAllocatorFlow.java" + }, { "raw": "No host with %s found", "en_US": "No host with {0} found", @@ -164,6 +174,15 @@ ], "fileName": "src/main/java/org/zstack/compute/host/HostApiInterceptor.java" }, + { + "raw": "the password for the physical machine [%s] is empty. please set a password", + "en_US": "the password for the physical machine [{0}] is empty. please set a password", + "zh_CN": "", + "arguments": [ + "msg.getHostName()" + ], + "fileName": "src/main/java/org/zstack/compute/host/HostApiInterceptor.java" + }, { "raw": "path cannot be empty", "en_US": "path cannot be empty", @@ -206,6 +225,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/host/HostApiInterceptor.java" }, + { + "raw": "the mount point must strictly follow the security pattern: \u0027^[a-zA-Z0-9_\\-./]+$\u0027. this requires: \\n1. only alphanumeric characters [a-z, A-Z, 0-9]\\n2. limited special characters: hyphen (-), underscore (_), period (.), and forward slash (/)\\n3. must be a valid absolute path starting with \u0027/\u0027\\n\\nvalid examples:\\n /mnt/data\\n /volumes/drive01\\n /backup-2023.disk\\n\\ninvalid value detected: \u0027%s\u0027", + "en_US": "the mount point must strictly follow the security pattern: \u0027^[a-zA-Z0-9_\\-./]+$\u0027. this requires: \\n1. only alphanumeric characters [a-z, A-Z, 0-9]\\n2. limited special characters: hyphen (-), underscore (_), period (.), and forward slash (/)\\n3. must be a valid absolute path starting with \u0027/\u0027\\n\\nvalid examples:\\n /mnt/data\\n /volumes/drive01\\n /backup-2023.disk\\n\\ninvalid value detected: \u0027{0}\u0027", + "zh_CN": "", + "arguments": [ + "mountPoint" + ], + "fileName": "src/main/java/org/zstack/compute/host/HostApiInterceptor.java" + }, { "raw": "mountPoint should not end with \u0027/\u0027 except root directory", "en_US": "mountPoint should not end with \u0027/\u0027 except root directory", @@ -242,6 +270,15 @@ ], "fileName": "src/main/java/org/zstack/compute/host/HostBase.java" }, + { + "raw": "host[%s] does not have ipmi device or ipmi does not have address.After config ipmi address, please reconnect host to refresh host ipmi information", + "en_US": "host[{0}] does not have ipmi device or ipmi does not have address.After config ipmi address, please reconnect host to refresh host ipmi information", + "zh_CN": "", + "arguments": [ + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/host/HostBase.java" + }, { "raw": "Host[%s] is in maintenance state, VM on this host should be migrated", "en_US": "Host[{0}] is in maintenance state, VM on this host should be migrated", @@ -400,6 +437,17 @@ ], "fileName": "src/main/java/org/zstack/compute/host/HostManagerImpl.java" }, + { + "raw": "failed to get disk devices, because [stderr:%s, stdout:%s, exitErrorMessage:%s]", + "en_US": "failed to get disk devices, because [stderr:{0}, stdout:{1}, exitErrorMessage:{2}]", + "zh_CN": "", + "arguments": [ + "ret.getStderr()", + "ret.getStdout()", + "ret.getExitErrorMessage()" + ], + "fileName": "src/main/java/org/zstack/compute/host/HostManagerImpl.java" + }, { "raw": "mountPoint %s is already mount on device %s", "en_US": "mountPoint {0} is already mount on device {1}", @@ -458,6 +506,21 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/AbstractVmInstance.java" }, + { + "raw": "cpu topology is not correct, cpuNum[%s], configured cpuSockets[%s], cpuCores[%s], cpuThreads[%s]; Calculated cpuSockets[%s], cpuCores[%s], cpuThreads[%s]", + "en_US": "cpu topology is not correct, cpuNum[{0}], configured cpuSockets[{1}], cpuCores[{2}], cpuThreads[{3}]; Calculated cpuSockets[{4}], cpuCores[{5}], cpuThreads[{6}]", + "zh_CN": "", + "arguments": [ + "cpuNum", + "cpuSockets", + "cpuCores", + "cpuThreads", + "socketNum", + "coreNum", + "threadNum" + ], + "fileName": "src/main/java/org/zstack/compute/vm/CpuTopology.java" + }, { "raw": "the host[uuid:%s] is not connected", "en_US": "the host[uuid:{0}] is not connected", @@ -634,6 +697,18 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmDestroyOnHypervisorFlow.java" }, + { + "raw": "cannot find the iso[uuid:%s] in any connected backup storage attached to the zone[uuid:%s]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is running\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the iso[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is running\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "iso.getUuid()", + "host.getZoneUuid()", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmDownloadIsoFlow.java" + }, { "raw": "hostname is empty", "en_US": "hostname is empty", @@ -705,6 +780,29 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmHostnameUtils.java" }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage attached to the zone[uuid:%s]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getZoneUuid()", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmImageSelectBackupStorageFlow.java" + }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {1}, uuid:{2}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmImageSelectBackupStorageFlow.java" + }, { "raw": "no backup storage attached to the zone[uuid:%s] contains the ISO[uuid:%s]", "en_US": "no backup storage attached to the zone[uuid:{0}] contains the ISO[uuid:{1}]", @@ -816,6 +914,16 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" }, + { + "raw": "unable to change to L3 network[uuid:%s] whose l2Network is not attached to the host[uuid:%s]", + "en_US": "unable to change to L3 network[uuid:{0}] whose l2Network is not attached to the host[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getDestL3NetworkUuid()", + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" + }, { "raw": "the image[name:%s, uuid:%s] is an ISO, rootDiskSize must be set", "en_US": "the image[name:{0}, uuid:{1}] is an ISO, rootDiskSize must be set", @@ -996,6 +1104,16 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" }, + { + "raw": "could not delete static ip [%s] for vm [uuid:%s] because it does not exist", + "en_US": "could not delete static ip [{0}] for vm [uuid:{1}] because it does not exist", + "zh_CN": "", + "arguments": [ + "msg.getStaticIp()", + "msg.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" + }, { "raw": "dns[%s] should be ipv%s address", "en_US": "dns[{0}] should be ipv{1} address", @@ -1201,6 +1319,17 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" }, + { + "raw": "unable to attach L3 network[uuid:%s] to VM[uuid:%s] whose l2Network is not attached to the host[uuid:%s]", + "en_US": "unable to attach L3 network[uuid:{0}] to VM[uuid:{1}] whose l2Network is not attached to the host[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "msg.getVmInstanceUuid()", + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java" + }, { "raw": "unable to attach the nic. The vm[uuid: %s] is not Running or Stopped; the current state is %s", "en_US": "unable to attach the nic. The vm[uuid: {0}] is not Running or Stopped; the current state is {1}", @@ -1621,6 +1750,18 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" }, + { + "raw": "the ISO[uuid:%s] is on backup storage that is not compatible of the primary storage[uuid:%s] where the VM[name:%s, uuid:%s] is on", + "en_US": "the ISO[uuid:{0}] is on backup storage that is not compatible of the primary storage[uuid:{1}] where the VM[name:{2}, uuid:{3}] is on", + "zh_CN": "", + "arguments": [ + "isoUuid", + "psUuid", + "self.getName()", + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" + }, { "raw": "failed to update vm[uuid\u003d%s] on hypervisor.", "en_US": "failed to update vm[uuid\u003d{0}] on hypervisor.", @@ -1696,6 +1837,15 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" }, + { + "raw": "Unable to find L3Network[uuid:%s] to start the current vm, it may have been deleted, Operation suggestion: delete this vm, recreate a new vm", + "en_US": "Unable to find L3Network[uuid:{0}] to start the current vm, it may have been deleted, Operation suggestion: delete this vm, recreate a new vm", + "zh_CN": "", + "arguments": [ + "inv.getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" + }, { "raw": "One vm cannot create %s CDROMs, vm can only add %s CDROMs", "en_US": "One vm cannot create {0} CDROMs, vm can only add {1} CDROMs", @@ -1744,6 +1894,38 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" }, + { + "raw": "unable to reset volume[uuid:%s] to origin image[uuid:%s], the vm[uuid:%s] volume attached to is not in Stopped state, current state is %s", + "en_US": "unable to reset volume[uuid:{0}] to origin image[uuid:{1}], the vm[uuid:{2}] volume attached to is not in Stopped state, current state is {3}", + "zh_CN": "", + "arguments": [ + "self.getRootVolumeUuid()", + "self.getImageUuid()", + "self.getUuid()", + "self.getState()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" + }, + { + "raw": "unable to reset volume[uuid:%s] to origin image[uuid:%s], cannot find image cache.", + "en_US": "unable to reset volume[uuid:{0}] to origin image[uuid:{1}], cannot find image cache.", + "zh_CN": "", + "arguments": [ + "rootVolume.getUuid()", + "rootVolume.getRootImageUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" + }, + { + "raw": "unable to reset volume[uuid:%s] to origin image[uuid:%s], for image type is ISO", + "en_US": "unable to reset volume[uuid:{0}] to origin image[uuid:{1}], for image type is ISO", + "zh_CN": "", + "arguments": [ + "rootVolume.getUuid()", + "rootVolume.getRootImageUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceBase.java" + }, { "raw": "VmInstanceStartNewCreatedVmExtensionPoint[%s] refuses to create vm[uuid:%s]", "en_US": "VmInstanceStartNewCreatedVmExtensionPoint[{0}] refuses to create vm[uuid:{1}]", @@ -2110,6 +2292,18 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java" }, + { + "raw": "conflict hostname in system tag[%s]; there has been a VM[uuid:%s] having hostname[%s] on L3 network[uuid:%s]", + "en_US": "conflict hostname in system tag[{0}]; there has been a VM[uuid:{1}] having hostname[{2}] on L3 network[uuid:{3}]", + "zh_CN": "", + "arguments": [ + "tag", + "sameTag.getResourceUuid()", + "hostname", + "l3Uuid" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java" + }, { "raw": "invalid boot device[%s] in boot order[%s]", "en_US": "invalid boot device[{0}] in boot order[{1}]", @@ -2284,6 +2478,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java" }, + { + "raw": "the resource[uuid:%s] is a ROOT volume, you cannot change its owner, instead,change the owner of the VM the root volume belongs to", + "en_US": "the resource[uuid:{0}] is a ROOT volume, you cannot change its owner, instead,change the owner of the VM the root volume belongs to", + "zh_CN": "", + "arguments": [ + "ref.getResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java" + }, { "raw": "failed to find host of vm[uuid\u003d%s]", "en_US": "failed to find host of vm[uuid\u003d{0}]", @@ -2293,6 +2496,23 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstanceUtils.java" }, + { + "raw": "Failed to instantiate volume. Because vm\u0027s host[uuid: %s] and allocated primary storage[uuid: %s] is not connected.", + "en_US": "Failed to instantiate volume. Because vm\u0027s host[uuid: {0}] and allocated primary storage[uuid: {1}] is not connected.", + "zh_CN": "", + "arguments": [ + "spec.getDestHost().getUuid()", + "pinv.getUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstantiateAttachingVolumeFlow.java" + }, + { + "raw": "the diskAO parameter is incorrect. need to set one of the following properties, and can only be one of them: size, templateUuid, diskOfferingUuid, sourceUuid-sourceType", + "en_US": "the diskAO parameter is incorrect. need to set one of the following properties, and can only be one of them: size, templateUuid, diskOfferingUuid, sourceUuid-sourceType", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vm/VmInstantiateOtherDiskFlow.java" + }, { "raw": "the disk does not support attachment. disk type is %s", "en_US": "the disk does not support attachment. disk type is {0}", @@ -2302,6 +2522,16 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmInstantiateOtherDiskFlow.java" }, + { + "raw": "vm current state[%s], modify virtio requires the vm state[%s]", + "en_US": "vm current state[{0}], modify virtio requires the vm state[{1}]", + "zh_CN": "", + "arguments": [ + "state", + "VmInstanceState.Stopped" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmNicManagerImpl.java" + }, { "raw": "duplicate nic params", "en_US": "duplicate nic params", diff --git a/conf/i18n_json/i18n_console.json b/conf/i18n_json/i18n_console.json index 4567abaf0d6..d761677a6df 100644 --- a/conf/i18n_json/i18n_console.json +++ b/conf/i18n_json/i18n_console.json @@ -1,4 +1,11 @@ [ + { + "raw": "the console agent is not connected; it\u0027s mostly like the management node just starts, please wait for the console agent connected, or you can reconnect it manually if disconnected for a long time.", + "en_US": "the console agent is not connected; it\u0027s mostly like the management node just starts, please wait for the console agent connected, or you can reconnect it manually if disconnected for a long time.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/console/AbstractConsoleProxyBackend.java" + }, { "raw": "cannot find host IP of the vm[uuid:%s], is the vm running???", "en_US": "cannot find host IP of the vm[uuid:{0}], is the vm running???", @@ -46,6 +53,17 @@ ], "fileName": "src/main/java/org/zstack/console/ConsoleProxyBase.java" }, + { + "raw": "console proxy[uuid: %s, status: %s] on agent[ip: %s] is not Connected, fail to delete it", + "en_US": "console proxy[uuid: {0}, status: {1}] on agent[ip: {2}] is not Connected, fail to delete it", + "zh_CN": "", + "arguments": [ + "consoleProxy.getUuid()", + "status", + "consoleProxy.getAgentIp()" + ], + "fileName": "src/main/java/org/zstack/console/DeleteConsoleProxyGcJob.java" + }, { "raw": "Ansible private key not found.", "en_US": "Ansible private key not found.", diff --git a/conf/i18n_json/i18n_core.json b/conf/i18n_json/i18n_core.json index 0e6d2e9561e..8fc5fb4b201 100644 --- a/conf/i18n_json/i18n_core.json +++ b/conf/i18n_json/i18n_core.json @@ -299,6 +299,15 @@ ], "fileName": "src/main/java/org/zstack/core/plugin/PluginManager.java" }, + { + "raw": "plugin[%s] name, productKey and vendor cannot be null", + "en_US": "plugin[{0}] name, productKey and vendor cannot be null", + "zh_CN": "插件[{0}]名称、产品密钥和供应商不能为空", + "arguments": [ + "pluginDriver.getClass()" + ], + "fileName": "src/main/java/org/zstack/core/plugin/PluginManager.java" + }, { "raw": "parameter apiId[%s] is not a valid uuid.", "en_US": "parameter apiId[{0}] is not a valid uuid.", diff --git a/conf/i18n_json/i18n_crypto.json b/conf/i18n_json/i18n_crypto.json index 794e9c8067a..eed3b97f1ae 100644 --- a/conf/i18n_json/i18n_crypto.json +++ b/conf/i18n_json/i18n_crypto.json @@ -396,6 +396,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/crypto/datacrypto/integrity/EncryptColumnIntegrityFactory.java" }, + { + "raw": "the shared mount point primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters", + "en_US": "the shared mount point primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/crypto/datacrypto/smp/SMPCryptoBase.java" + }, { "raw": "invalid certificate parameter : %s\u003d%s", "en_US": "invalid certificate parameter : {0}\u003d{1}", @@ -639,6 +649,15 @@ ], "fileName": "src/main/java/org/zstack/crypto/securitymachine/secretresourcepool/SecretResourcePoolApiInterceptor.java" }, + { + "raw": "the identity authentication function is enabled but the corresponding resource pool is not set, please re-enable the function and try again", + "en_US": "the identity authentication function is enabled but the corresponding resource pool is not set, please re-enable the function and try again", + "zh_CN": "", + "arguments": [ + "msg.getSecretResourcePoolUuid()" + ], + "fileName": "src/main/java/org/zstack/crypto/securitymachine/secretresourcepool/SecretResourcePoolApiInterceptor.java" + }, { "raw": "cannot delete the resource pool %s when in use", "en_US": "cannot delete the resource pool {0} when in use", diff --git a/conf/i18n_json/i18n_directory.json b/conf/i18n_json/i18n_directory.json index ae8a8a5e3bf..6fc187f7803 100644 --- a/conf/i18n_json/i18n_directory.json +++ b/conf/i18n_json/i18n_directory.json @@ -19,6 +19,13 @@ ], "fileName": "src/main/java/org/zstack/directory/DirectoryApiInterceptor.java" }, + { + "raw": "name contains unsupported characters, name can only contain Chinese characters, English letters, numbers, spaces, and the following characters: ()()【】@._-+ ", + "en_US": "name contains unsupported characters, name can only contain Chinese characters, English letters, numbers, spaces, and the following characters: ()()【】@._-+ ", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/directory/DirectoryApiInterceptor.java" + }, { "raw": "circular dependency detected, directory %s and directory %s will cause circular dependency", "en_US": "circular dependency detected, directory {0} and directory {1} will cause circular dependency", diff --git a/conf/i18n_json/i18n_eip.json b/conf/i18n_json/i18n_eip.json index f95fe5c6227..8fa37306600 100644 --- a/conf/i18n_json/i18n_eip.json +++ b/conf/i18n_json/i18n_eip.json @@ -105,6 +105,19 @@ ], "fileName": "src/main/java/org/zstack/network/service/eip/EipApiInterceptor.java" }, + { + "raw": "the vm[uuid:%s] that the EIP is about to attach is already on the public network[uuid:%s] from which the vip[uuid:%s, name:%s, ip:%s] comes", + "en_US": "the vm[uuid:{0}] that the EIP is about to attach is already on the public network[uuid:{1}] from which the vip[uuid:{2}, name:{3}, ip:{4}] comes", + "zh_CN": "", + "arguments": [ + "vmUuid", + "vip.getL3NetworkUuid()", + "vip.getUuid()", + "vip.getName()", + "vip.getIp()" + ], + "fileName": "src/main/java/org/zstack/network/service/eip/EipApiInterceptor.java" + }, { "raw": "vip[uuid:%s] has been occupied other network service entity[%s]", "en_US": "vip[uuid:{0}] has been occupied other network service entity[{1}]", @@ -179,5 +192,17 @@ "struct.getEip().getUuid()" ], "fileName": "src/main/java/org/zstack/network/service/eip/EipManagerImpl.java" + }, + { + "raw": "unable to attach the L3 network[uuid:%s, name:%s] to the vm[uuid:%s, name:%s], because the L3 network is providing EIP to one of the vm\u0027s nic", + "en_US": "unable to attach the L3 network[uuid:{0}, name:{1}] to the vm[uuid:{2}, name:{3}], because the L3 network is providing EIP to one of the vm\u0027s nic", + "zh_CN": "", + "arguments": [ + "l3.getUuid()", + "l3.getName()", + "vm.getUuid()", + "vm.getName()" + ], + "fileName": "src/main/java/org/zstack/network/service/eip/EipManagerImpl.java" } ] \ No newline at end of file diff --git a/conf/i18n_json/i18n_faulttolerance.json b/conf/i18n_json/i18n_faulttolerance.json index 8b49c3b9d8d..d03b614460f 100644 --- a/conf/i18n_json/i18n_faulttolerance.json +++ b/conf/i18n_json/i18n_faulttolerance.json @@ -1,4 +1,36 @@ [ + { + "raw": "pvm[uuid:%s] and svm[uuid:%s] volume number not matches, do not allowed to start", + "en_US": "pvm[uuid:{0}] and svm[uuid:{1}] volume number not matches, do not allowed to start", + "zh_CN": "", + "arguments": [ + "group.getPrimaryVmInstanceUuid()", + "group.getSecondaryVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" + }, + { + "raw": "volume with index: %d, of pvm[uuid:%s] and svm[uuid:%s] have different size, do not allowed to start", + "en_US": "volume with index: {0}, of pvm[uuid:{1}] and svm[uuid:{2}] have different size, do not allowed to start", + "zh_CN": "", + "arguments": [ + "i", + "group.getPrimaryVmInstanceUuid()", + "group.getSecondaryVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" + }, + { + "raw": "volume with index: %d, of pvm[uuid:%s] and svm[uuid:%s]\u0027s cache volume have different size, do not allowed to start", + "en_US": "volume with index: {0}, of pvm[uuid:{1}] and svm[uuid:{2}]\u0027s cache volume have different size, do not allowed to start", + "zh_CN": "", + "arguments": [ + "i", + "group.getPrimaryVmInstanceUuid()", + "group.getSecondaryVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" + }, { "raw": "image[uuid:%s] is still used by fault tolerance vm[uuid:%s]", "en_US": "image[uuid:{0}] is still used by fault tolerance vm[uuid:{1}]", @@ -9,6 +41,18 @@ ], "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" }, + { + "raw": "could not delete l3 network[uuid:%s]. Fault tolerance vm[%s] in states[%s, %s] still using it. Stop related fault tolerance vms before delete l3 network", + "en_US": "could not delete l3 network[uuid:{0}]. Fault tolerance vm[{1}] in states[{2}, {3}] still using it. Stop related fault tolerance vms before delete l3 network", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "String.join(\",\", vmInstanceUuids)", + "VmInstanceState.Paused", + "VmInstanceState.Running" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" + }, { "raw": "Can not fail-over vm[uuid:%s], please enable ft in GlobalConfig", "en_US": "Can not fail-over vm[uuid:{0}], please enable ft in GlobalConfig", @@ -47,6 +91,17 @@ ], "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" }, + { + "raw": "current operation[api:%s] is not supported when ft vm[uuid:%s, state:%s] is not stopped", + "en_US": "current operation[api:{0}] is not supported when ft vm[uuid:{1}, state:{2}] is not stopped", + "zh_CN": "", + "arguments": [ + "msg.getClass()", + "msg.getVmInstanceUuid()", + "state" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceApiInterceptor.java" + }, { "raw": "Can not set vm level to %s, please enable ft in GlobalConfig", "en_US": "Can not set vm level to {0}, please enable ft in GlobalConfig", @@ -386,6 +441,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceGroupVmInstanceBase.java" }, + { + "raw": "unable to start the vm[uuid:%s]. It doesn\u0027t have any nic, please attach a nic and try again", + "en_US": "unable to start the vm[uuid:{0}]. It doesn\u0027t have any nic, please attach a nic and try again", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceGroupVmInstanceBase.java" + }, { "raw": "an other fault tolerance gc task is running, cancel the new task and wait return", "en_US": "an other fault tolerance gc task is running, cancel the new task and wait return", @@ -446,6 +510,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceManagerImpl.java" }, + { + "raw": "cannot found available ip from current ft network. Check whether global config[category:ft name:fault.tolerance.network.cidr] is correctly set, and confirm that host[uuid:%s] own ip address in the CIDR", + "en_US": "cannot found available ip from current ft network. Check whether global config[category:ft name:fault.tolerance.network.cidr] is correctly set, and confirm that host[uuid:{0}] own ip address in the CIDR", + "zh_CN": "", + "arguments": [ + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceManagerImpl.java" + }, { "raw": "can not start secondary vm, because primary vm is stopped", "en_US": "can not start secondary vm, because primary vm is stopped", @@ -480,6 +553,29 @@ ], "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceManagerImpl.java" }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage attached to the zone[uuid:%s]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {2}, uuid:{3}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getZoneUuid()", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceVmImageSelectBackupStorageFlow.java" + }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: %s, uuid:%s] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage. check below:\\n1. if the backup storage is attached to the zone where the VM[name: {1}, uuid:{2}] is in\\n2. if the backup storage is in connected status, if not, try reconnecting it", + "zh_CN": "", + "arguments": [ + "imageUuid", + "spec.getVmInventory().getName()", + "spec.getVmInventory().getUuid()" + ], + "fileName": "src/main/java/org/zstack/faulttolerance/FaultToleranceVmImageSelectBackupStorageFlow.java" + }, { "raw": "no backup storage attached to the zone[uuid:%s] contains the ISO[uuid:%s]", "en_US": "no backup storage attached to the zone[uuid:{0}] contains the ISO[uuid:{1}]", diff --git a/conf/i18n_json/i18n_flatNetworkProvider.json b/conf/i18n_json/i18n_flatNetworkProvider.json index bad33f0649f..9e2f81e30a6 100644 --- a/conf/i18n_json/i18n_flatNetworkProvider.json +++ b/conf/i18n_json/i18n_flatNetworkProvider.json @@ -126,6 +126,15 @@ ], "fileName": "src/main/java/org/zstack/network/service/flat/FlatDhcpBackend.java" }, + { + "raw": "could not delete ip address, because ip [%s] is dhcp server ip", + "en_US": "could not delete ip address, because ip [{0}] is dhcp server ip", + "zh_CN": "", + "arguments": [ + "vo.getIp()" + ], + "fileName": "src/main/java/org/zstack/network/service/flat/FlatDhcpBackend.java" + }, { "raw": "could not set dhcp v4 server ip, because there is no ipv4 range", "en_US": "could not set dhcp v4 server ip, because there is no ipv4 range", @@ -279,6 +288,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/network/service/flat/FlatEipApiInterceptor.java" }, + { + "raw": "L2Network where vip\u0027s L3Network based hasn\u0027t attached the cluster where vmNic[uuid:%s] located", + "en_US": "L2Network where vip\u0027s L3Network based hasn\u0027t attached the cluster where vmNic[uuid:{0}] located", + "zh_CN": "", + "arguments": [ + "vmNicUuid" + ], + "fileName": "src/main/java/org/zstack/network/service/flat/FlatEipApiInterceptor.java" + }, { "raw": "can not bound more than 1 %s eip to a vm nic[uuid:%s] of flat ", "en_US": "can not bound more than 1 {0} eip to a vm nic[uuid:{1}] of flat ", diff --git a/conf/i18n_json/i18n_guesttools.json b/conf/i18n_json/i18n_guesttools.json index 4fdf4c816d1..80ea342f515 100644 --- a/conf/i18n_json/i18n_guesttools.json +++ b/conf/i18n_json/i18n_guesttools.json @@ -399,7 +399,7 @@ { "raw": "failed to get guest tools state from prometheus: [metric\u003d%s]", "en_US": "failed to get guest tools state from prometheus: [metric\u003d{0}]", - "zh_CN": "无法从 Prometheus 获取 metric={0} 的 VM-Tools 状态", + "zh_CN": "无法从 Prometheus 获取 metric 等于 {0} 的 VM-Tools 状态", "arguments": [ "metricName" ], diff --git a/conf/i18n_json/i18n_hybrid.json b/conf/i18n_json/i18n_hybrid.json index 4da72888012..c7e83083de7 100644 --- a/conf/i18n_json/i18n_hybrid.json +++ b/conf/i18n_json/i18n_hybrid.json @@ -199,6 +199,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/ecs/CreateEcsInstancePublicFlow.java" }, + { + "raw": "This region [%s] cannot produce instance type [%s] now, please select another instance type or another region", + "en_US": "This region [{0}] cannot produce instance type [{1}] now, please select another instance type or another region", + "zh_CN": "", + "arguments": [ + "regionId", + "data1.get(\"type\")" + ], + "fileName": "src/main/java/org/zstack/aliyun/ecs/CreateEcsInstancePublicFlow.java" + }, { "raw": "no system disk found for ecs: [%s], ecs id is: [%s]", "en_US": "no system disk found for ecs: [{0}], ecs id is: [{1}]", @@ -388,6 +398,16 @@ ], "fileName": "src/main/java/org/zstack/aliyun/image/EcsImageApiInterceptor.java" }, + { + "raw": "valid platform:[%s] for aliyun image import, valid value are: [%s]", + "en_US": "valid platform:[{0}] for aliyun image import, valid value are: [{1}]", + "zh_CN": "", + "arguments": [ + "platform", + "EcsImageConstant.ECS_IMAGE_PLATFORM.getValid()" + ], + "fileName": "src/main/java/org/zstack/aliyun/image/EcsImageApiInterceptor.java" + }, { "raw": "image [%s] is not enable now", "en_US": "image [{0}] is not enable now", @@ -416,6 +436,16 @@ ], "fileName": "src/main/java/org/zstack/aliyun/network/AliyunNetworkServiceBase.java" }, + { + "raw": "Only ecs instances that are in the running and stopped status can detach the eip , but the ecs [%s] status is [%s] now ", + "en_US": "Only ecs instances that are in the running and stopped status can detach the eip , but the ecs [{0}] status is [{1}] now ", + "zh_CN": "", + "arguments": [ + "ecsVo.getUuid()", + "ecsVo.getEcsStatus()" + ], + "fileName": "src/main/java/org/zstack/aliyun/network/AliyunNetworkServiceBase.java" + }, { "raw": "virtual border: %s has been deleted", "en_US": "virtual border: {0} has been deleted", @@ -453,6 +483,16 @@ ], "fileName": "src/main/java/org/zstack/aliyun/network/AliyunNetworkServiceBase.java" }, + { + "raw": "Only esc instances that are in the running and stopped status can attach the eip , but the ecs [%s] status is [%s] now ", + "en_US": "Only esc instances that are in the running and stopped status can attach the eip , but the ecs [{0}] status is [{1}] now ", + "zh_CN": "", + "arguments": [ + "evo.getUuid()", + "evo.getEcsStatus()" + ], + "fileName": "src/main/java/org/zstack/aliyun/network/AliyunNetworkServiceBase.java" + }, { "raw": "Vbr: [%s] is in create connection progress, please wait...", "en_US": "Vbr: [{0}] is in create connection progress, please wait...", @@ -462,6 +502,28 @@ ], "fileName": "src/main/java/org/zstack/aliyun/network/connection/AliyunConnectionManagerImpl.java" }, + { + "raw": "custom cidr [%s] is already existed in vbr [%s], it is overlapped with target cidr [%s], please check and delete it first.", + "en_US": "custom cidr [{0}] is already existed in vbr [{1}], it is overlapped with target cidr [{2}], please check and delete it first.", + "zh_CN": "", + "arguments": [ + "tuple.get(0, String.class)", + "msg.getVbrUuid()", + "vpcCidr" + ], + "fileName": "src/main/java/org/zstack/aliyun/network/connection/CreateVbrRouteEntryFlow.java" + }, + { + "raw": "custom cidr [%s] is already existed in vrouter [%s], it is overlapped with target cidr [%s], please check and delete it first.", + "en_US": "custom cidr [{0}] is already existed in vrouter [{1}], it is overlapped with target cidr [{2}], please check and delete it first.", + "zh_CN": "", + "arguments": [ + "tuple.get(0, String.class)", + "vRouterUuid", + "cidr" + ], + "fileName": "src/main/java/org/zstack/aliyun/network/connection/CreateVpcRouteEntryFlow.java" + }, { "raw": "No Such VRouter nic found for l3network: %s", "en_US": "No Such VRouter nic found for l3network: {0}", @@ -655,6 +717,20 @@ "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/storage/disk/AliyunDiskApiInterceptor.java" }, + { + "raw": "Cannot set the disk\u0027s deleteWithInstance property to false when the category property of the disk is ephemeral", + "en_US": "Cannot set the disk\u0027s deleteWithInstance property to false when the category property of the disk is ephemeral", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/aliyun/storage/disk/AliyunDiskApiInterceptor.java" + }, + { + "raw": "Cannot set the disk\u0027s deleteWithInstance property to false when the category property of the disk is cloud and portable property is false", + "en_US": "Cannot set the disk\u0027s deleteWithInstance property to false when the category property of the disk is cloud and portable property is false", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/aliyun/storage/disk/AliyunDiskApiInterceptor.java" + }, { "raw": "The disk [%s] is not attach on any instance ", "en_US": "The disk [{0}] is not attach on any instance ", @@ -725,6 +801,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/aliyun/storage/disk/AliyunDiskApiInterceptor.java" }, + { + "raw": "The size and snapshot id in the request parameter must select one of the items to specify the size of the disk or create a disk using the snapshot.", + "en_US": "The size and snapshot id in the request parameter must select one of the items to specify the size of the disk or create a disk using the snapshot.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/aliyun/storage/disk/AliyunDiskApiInterceptor.java" + }, { "raw": "Not allowed create disk on root volume snapshot", "en_US": "Not allowed create disk on root volume snapshot", @@ -1164,6 +1247,15 @@ ], "fileName": "src/main/java/org/zstack/hybrid/network/HybridNetworkApiInterceptor.java" }, + { + "raw": "invalid CidrBlock: %s, which must subnet in \u002710.0.0.0/8\u0027, \u0027172.16.0.0/12\u0027, \u0027192.168.0.0/16\u0027", + "en_US": "invalid CidrBlock: {0}, which must subnet in \u002710.0.0.0/8\u0027, \u0027172.16.0.0/12\u0027, \u0027192.168.0.0/16\u0027", + "zh_CN": "", + "arguments": [ + "msg.getCidrBlock()" + ], + "fileName": "src/main/java/org/zstack/hybrid/network/HybridNetworkApiInterceptor.java" + }, { "raw": "no such virtual border router: %s", "en_US": "no such virtual border router: {0}", diff --git a/conf/i18n_json/i18n_iam2.json b/conf/i18n_json/i18n_iam2.json index 828b1ca0019..afe60d07cc4 100644 --- a/conf/i18n_json/i18n_iam2.json +++ b/conf/i18n_json/i18n_iam2.json @@ -143,6 +143,15 @@ ], "fileName": "src/main/java/org/zstack/iam2/IAM2ManagerImpl.java" }, + { + "raw": "Can not do operations, because current organization[uuid:%s] is staled, please enable it", + "en_US": "Can not do operations, because current organization[uuid:{0}] is staled, please enable it", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/iam2/IAM2OrganizationBase.java" + }, { "raw": "organization[uuid:%s] is parent of the organization[uuid:%s], cannot set it as a child organization", "en_US": "organization[uuid:{0}] is parent of the organization[uuid:{1}], cannot set it as a child organization", @@ -231,6 +240,15 @@ ], "fileName": "src/main/java/org/zstack/iam2/IAM2QuotaUpdateChecker.java" }, + { + "raw": "Can not do operations, because Current virtualID[uuid:%s] is staled, please enable it", + "en_US": "Can not do operations, because Current virtualID[uuid:{0}] is staled, please enable it", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/iam2/IAM2VirtualIDBase.java" + }, { "raw": "only admin and the virtual ID itself can do the update", "en_US": "only admin and the virtual ID itself can do the update", @@ -400,6 +418,16 @@ ], "fileName": "src/main/java/org/zstack/iam2/api/IAM2ApiInterceptor.java" }, + { + "raw": "parent organization[uuid:%s] cannot be a child organization[uuid:%s] of a childOrganization", + "en_US": "parent organization[uuid:{0}] cannot be a child organization[uuid:{1}] of a childOrganization", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "msg.getParentUuid()" + ], + "fileName": "src/main/java/org/zstack/iam2/api/IAM2ApiInterceptor.java" + }, { "raw": "duplicate virtualID name[%s]", "en_US": "duplicate virtualID name[{0}]", @@ -630,6 +658,26 @@ ], "fileName": "src/main/java/org/zstack/iam2/attribute/project/RetirePolicy.java" }, + { + "raw": "virtual ID[uuid:%s] already has admin related attributes, can not add %s", + "en_US": "virtual ID[uuid:{0}] already has admin related attributes, can not add {1}", + "zh_CN": "", + "arguments": [ + "vid", + "attributeName" + ], + "fileName": "src/main/java/org/zstack/iam2/attribute/virtualid/AbstractAdminAttribute.java" + }, + { + "raw": "organiztion ID[uuid:%s] already has opoeration attributes, can not add %s", + "en_US": "organiztion ID[uuid:{0}] already has opoeration attributes, can not add {1}", + "zh_CN": "", + "arguments": [ + "inv.getValue()", + "IAM2_ORGANIZATION_OPERATION.getName()" + ], + "fileName": "src/main/java/org/zstack/iam2/attribute/virtualid/IAM2OrganizationOperator.java" + }, { "raw": "virtual id[uuid:%s] already has a project operator attribute", "en_US": "virtual id[uuid:{0}] already has a project operator attribute", diff --git a/conf/i18n_json/i18n_identity.json b/conf/i18n_json/i18n_identity.json index 0b12f18f79c..1779e062692 100644 --- a/conf/i18n_json/i18n_identity.json +++ b/conf/i18n_json/i18n_identity.json @@ -38,6 +38,17 @@ ], "fileName": "src/main/java/org/zstack/identity/AccountManagerImpl.java" }, + { + "raw": "Invalid ChangeResourceOwner operation.Original owner is the same as target owner.Current account is [uuid: %s].The resource target owner account[uuid: %s].The resource original owner account[uuid:%s].", + "en_US": "Invalid ChangeResourceOwner operation.Original owner is the same as target owner.Current account is [uuid: {0}].The resource target owner account[uuid: {1}].The resource original owner account[uuid:{2}].", + "zh_CN": "无效的 ChangeResourceOwner 操作。原始拥有者与目标拥有者相同。当前账户是[uuid: {0}],资源目标拥有者账户[uuid: {1}],资源原始拥有者账户[uuid:{2}]", + "arguments": [ + "currentAccountUuid", + "resourceTargetOwnerAccountUuid", + "resourceOriginalOwnerAccountUuid" + ], + "fileName": "src/main/java/org/zstack/identity/AccountManagerImpl.java" + }, { "raw": "cannot find the account[uuid:%s]", "en_US": "cannot find the account[uuid:{0}]", @@ -226,6 +237,20 @@ ], "fileName": "src/main/java/org/zstack/identity/DefaultAuthorizationBackend.java" }, + { + "raw": "quota exceeding.The resource owner(or target resource owner) account[uuid: %s name: %s] exceeds a quota[name: %s, value: %s], Current used:%s, Request:%s. Please contact the administrator.", + "en_US": "quota exceeding. The resource owner(or target resource owner) account[uuid: {0} name: {1}] exceeds a quota[name: {2}, value: {3}], Current used:{4}, Request:{5}. Please contact the administrator.", + "zh_CN": "配额已超出。资源拥有者(或目标资源拥有者)帐户[uuid:{0},名称:{1}]超过了配额[名称:{2},值:{3}],当前使用:{4},请求:{5}。请与管理员联系。", + "arguments": [ + "quotaCompareInfo.resourceTargetOwnerAccountUuid", + "StringUtils.trimToEmpty(accountName)", + "quotaCompareInfo.quotaName", + "quotaCompareInfo.quotaValue", + "quotaCompareInfo.currentUsed", + "quotaCompareInfo.request" + ], + "fileName": "src/main/java/org/zstack/identity/QuotaUtil.java" + }, { "raw": "quota exceeding. The account[uuid: %s] exceeds a quota[name: %s, value: %s]. Please contact the administrator.", "en_US": "quota exceeding. The account[uuid: {0}] exceeds a quota[name: {1}, value: {2}]. Please contact the administrator.", @@ -237,6 +262,19 @@ ], "fileName": "src/main/java/org/zstack/identity/QuotaUtil.java" }, + { + "raw": "quota exceeding. The account[uuid: %s] exceeds a quota[name: %s, value: %s], Current used:%s, Request:%s. Please contact the administrator.", + "en_US": "quota exceeding. The account[uuid: {0}] exceeds a quota[name: {1}, value: {2}], Current used:{3}, Request:{4}. Please contact the administrator.", + "zh_CN": "配额已超出。帐户[uuid:{0}]超过了配额[名称:{1},值:{2}],当前使用:{3},请求:{4}。请与管理员联系。", + "arguments": [ + "currentAccountUuid", + "quotaName", + "quotaValue", + "currentUsed", + "request" + ], + "fileName": "src/main/java/org/zstack/identity/QuotaUtil.java" + }, { "raw": "Login sessions hit limit of max allowed concurrent login sessions", "en_US": "Login sessions hit limit of max allowed concurrent login sessions", diff --git a/conf/i18n_json/i18n_image.json b/conf/i18n_json/i18n_image.json index 71e3977e5d4..076ddb2cc7e 100644 --- a/conf/i18n_json/i18n_image.json +++ b/conf/i18n_json/i18n_image.json @@ -6,6 +6,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/image/AddImageLongJob.java" }, + { + "raw": "the backup storage[uuid:%s] is not in status of Connected, current status is %s", + "en_US": "the backup storage[uuid:{0}] is not in status of Connected, current status is {1}", + "zh_CN": "镜像服务器[uuid:{0}]未处于Connected状态,当前状态是{1}", + "arguments": [ + "backupStorageUuid", + "bsStatus" + ], + "fileName": "src/main/java/org/zstack/image/BackupStorageDeleteBitGC.java" + }, { "raw": "The aarch64 architecture does not support legacy.", "en_US": "The aarch64 architecture does not support legacy.", diff --git a/conf/i18n_json/i18n_kvm.json b/conf/i18n_json/i18n_kvm.json index f0679d4cb72..8271000d4fe 100644 --- a/conf/i18n_json/i18n_kvm.json +++ b/conf/i18n_json/i18n_kvm.json @@ -247,6 +247,26 @@ "arguments": [], "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" }, + { + "raw": "cannot do volume snapshot merge when vm[uuid:%s] is in state of %s. The operation is only allowed when vm is Running or Stopped", + "en_US": "cannot do volume snapshot merge when vm[uuid:{0}] is in state of {1}. The operation is only allowed when vm is Running or Stopped", + "zh_CN": "在虚拟机[uuid:{0}]处于状态[1]时无法进行快照合并", + "arguments": [ + "volume.getUuid()", + "state" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, + { + "raw": "live volume snapshot merge needs libvirt version greater than %s, current libvirt version is %s. Please stop vm and redo the operation or detach the volume if it\u0027s data volume", + "en_US": "live volume snapshot merge needs libvirt version greater than {0}, current libvirt version is {1}. Please stop vm and redo the operation or detach the volume if it\u0027s data volume", + "zh_CN": "快照合并需要libvirt版本大于[0],当前libvirt版本为[1],请停止虚拟机并重新执行操作,或者将数据卷从虚拟机中分离", + "arguments": [ + "KVMConstant.MIN_LIBVIRT_LIVE_BLOCK_COMMIT_VERSION", + "libvirtVersion" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, { "raw": "vm[uuid:%s] is not Running or Stopped, current state[%s]", "en_US": "vm[uuid:{0}] is not Running or Stopped, current state[{1}]", @@ -276,6 +296,45 @@ "arguments": [], "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" }, + { + "raw": "failed to update nic[vm:%s] on kvm host[uuid:%s, ip:%s],because %s", + "en_US": "failed to update nic[vm:{0}] on kvm host[uuid:{1}, ip:{2}],because {3}", + "zh_CN": "无法更新主机[uuid:{1}, ip:{2}] 虚拟机[vm:{0}]的网卡: {3}", + "arguments": [ + "msg.getVmInstanceUuid()", + "self.getUuid()", + "self.getManagementIp()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, + { + "raw": "failed to attach nic[uuid:%s, vm:%s] on kvm host[uuid:%s, ip:%s],because %s, please try again or delete device[%s] by yourself", + "en_US": "failed to attach nic[uuid:{0}, vm:{1}] on kvm host[uuid:{2}, ip:{3}],because {4}, please try again or delete device[{5}] by yourself", + "zh_CN": "无法将网卡[uuid:{0}, vm:{1}] 添加到主机[uuid:{2}, ip:{3}],因为:{4},请重新尝试或者自行删除设备[5]", + "arguments": [ + "msg.getNicInventory().getUuid()", + "msg.getNicInventory().getVmInstanceUuid()", + "self.getUuid()", + "self.getManagementIp()", + "ret.getError()", + "msg.getNicInventory().getInternalName()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, + { + "raw": "failed to attach nic[uuid:%s, vm:%s] on kvm host[uuid:%s, ip:%s],because %s", + "en_US": "failed to attach nic[uuid:{0}, vm:{1}] on kvm host[uuid:{2}, ip:{3}],because {4}", + "zh_CN": "无法将网卡[uuid:{0}, vm:{1}] 添加到主机[uuid:{2}, ip:{3}],因为:{4}", + "arguments": [ + "msg.getNicInventory().getUuid()", + "msg.getNicInventory().getVmInstanceUuid()", + "self.getUuid()", + "self.getManagementIp()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, { "raw": "failed to detach data volume[uuid:%s, installPath:%s] from vm[uuid:%s, name:%s] on kvm host[uuid:%s, ip:%s], because %s", "en_US": "failed to detach data volume[uuid:{0}, installPath:{1}] from vm[uuid:{2}, name:{3}] on kvm host[uuid:{4}, ip:{5}], because {6}", @@ -301,6 +360,21 @@ ], "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" }, + { + "raw": "failed to attach data volume[uuid:%s, installPath:%s] to vm[uuid:%s, name:%s] on kvm host[uuid:%s, ip:%s], because %s", + "en_US": "failed to attach data volume[uuid:{0}, installPath:{1}] to vm[uuid:{2}, name:{3}] on kvm host[uuid:{4}, ip:{5}], because {6}", + "zh_CN": "无法在主机[uuid:{4}, ip:{5}]上挂载硬盘[uuid:{0}, installPath:{1}]到虚拟机[uuid:{2}, name:{3}],因为: {6}", + "arguments": [ + "vol.getUuid()", + "vol.getInstallPath()", + "vm.getUuid()", + "vm.getName()", + "getSelf().getUuid()", + "getSelf().getManagementIp()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, { "raw": "failed to destroy vm[uuid:%s name:%s] on kvm host[uuid:%s, ip:%s], because %s", "en_US": "failed to destroy vm[uuid:{0} name:{1}] on kvm host[uuid:{2}, ip:{3}], because {4}", @@ -493,6 +567,18 @@ "arguments": [], "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" }, + { + "raw": "detected abnormal status[host uuid change, expected: %s but: %s or agent version change, expected: %s but: %s] of kvmagent,it\u0027s mainly caused by kvmagent restarts behind zstack management server. Report this to ping task, it will issue a reconnect soon", + "en_US": "detected abnormal status[host uuid change, expected: {0} but: {1} or agent version change, expected: {2} but: {3}] of kvmagent,it\u0027s mainly caused by kvmagent restarts behind zstack management server. Report this to ping task, it will issue a reconnect soon", + "zh_CN": "检测到 KVM 代理异常状态[主机uuid改变,期望值:{0},实际值:{1} 或代理版本改变,期望值:{2},实际值:{3}],这通常是因为 KVM 代理重启导致管理节点无法获取到 KVM 代理状态。请等待主机重新完成后重试", + "arguments": [ + "self.getUuid()", + "ret.getHostUuid()", + "dbf.getDbVersion()", + "ret.getVersion()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, { "raw": "unable to connect to kvm host[uuid:%s, ip:%s, url:%s], because %s", "en_US": "unable to connect to kvm host[uuid:{0}, ip:{1}, url:{2}], because {3}", @@ -515,7 +601,7 @@ { "raw": "connection error for KVM host[uuid:%s, ip:%s]", "en_US": "connection error for KVM host[uuid:{0}, ip:{1}]", - "zh_CN": "连接主机 {0} [ip={1}] 失败", + "zh_CN": "连接主机 {0} [ip:{1}] 失败", "arguments": [ "self.getUuid()", "self.getManagementIp()" @@ -581,6 +667,30 @@ ], "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" }, + { + "raw": "unable to connect to KVM[ip:%s, username:%s, sshPort:%d] to check the management node connectivity,please check if username/password is wrong; %s", + "en_US": "unable to connect to KVM[ip:{0}, username:{1}, sshPort:{2}] to check the management node connectivity,please check if username/password is wrong; {3}", + "zh_CN": "无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2}] 做管理节点检查,请检查用户名密码是否正确;{3}", + "arguments": [ + "self.getManagementIp()", + "getSelf().getUsername()", + "getSelf().getPort()", + "ret.getExitErrorMessage()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, + { + "raw": "the KVM host[ip:%s] cannot access the management node\u0027s callback url. It seems that the KVM host cannot reach the management IP[%s]. %s %s", + "en_US": "the KVM host[ip:{0}] cannot access the management node\u0027s callback url. It seems that the KVM host cannot reach the management IP[{1}]. {2} {3}", + "zh_CN": "主机[ip:{0}]无法访问管理节点的回调地址。似乎是主机的管理IP无法访问,{2} {3}", + "arguments": [ + "self.getManagementIp()", + "restf.getHostName()", + "ret.getStderr()", + "ret.getExitErrorMessage()" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHost.java" + }, { "raw": "unable to check whether the host is taken over", "en_US": "unable to check whether the host is taken over", @@ -889,6 +999,16 @@ ], "fileName": "src/main/java/org/zstack/kvm/KVMHostFactory.java" }, + { + "raw": "vm current state[%s], modify bus type requires the vm state[%s]", + "en_US": "vm current state[{0}], modify bus type requires the vm state[{1}]", + "zh_CN": "VM 当前状态[{0}],修改 BUS 类型需要VM状态[{1}]", + "arguments": [ + "vm.getState()", + "VmInstanceState.Stopped" + ], + "fileName": "src/main/java/org/zstack/kvm/KVMHostFactory.java" + }, { "raw": "vm do not support having both SCSI and Virtio-SCSI bus type volumes simultaneously.", "en_US": "vm do not support having both SCSI and Virtio-SCSI bus type volumes simultaneously.", @@ -1014,6 +1134,13 @@ ], "fileName": "src/main/java/org/zstack/kvm/KVMSecurityGroupBackend.java" }, + { + "raw": "Failed to start vm, because can not disable vm.cpu.hypervisor.feature with vm.cpuMode none", + "en_US": "Failed to start vm, because can not disable vm.cpu.hypervisor.feature with vm.cpuMode none", + "zh_CN": "无法启动虚拟机:当 vm.cpuMode 为 none 时无法禁用 vm.cpu.hypervisor.feature", + "arguments": [], + "fileName": "src/main/java/org/zstack/kvm/KvmVmHardwareVerifyExtensionPoint.java" + }, { "raw": "cannot get vmUuid from msg %s", "en_US": "cannot get vmUuid from msg {0}", diff --git a/conf/i18n_json/i18n_loadBalancer.json b/conf/i18n_json/i18n_loadBalancer.json index b0b88c0b1ae..89376f3b2f4 100644 --- a/conf/i18n_json/i18n_loadBalancer.json +++ b/conf/i18n_json/i18n_loadBalancer.json @@ -753,6 +753,17 @@ ], "fileName": "src/main/java/org/zstack/network/service/lb/LoadBalancerApiInterceptor.java" }, + { + "raw": "could not add vm nic [uuid:%s] to server group [uuid:%s] because listener [uuid:%s] attached this server group already the nic to be added", + "en_US": "could not add vm nic [uuid:{0}] to server group [uuid:{1}] because listener [uuid:{2}] attached this server group already the nic to be added", + "zh_CN": "", + "arguments": [ + "vmNicUuids", + "msg.getServerGroupUuid()", + "listenerVO.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/lb/LoadBalancerApiInterceptor.java" + }, { "raw": "could not add backend server ip to serverGroup [uuid:%s], because ip [ipAddress:%s] is repeated", "en_US": "could not add backend server ip to serverGroup [uuid:{0}], because ip [ipAddress:{1}] is repeated", @@ -1011,6 +1022,17 @@ ], "fileName": "src/main/java/org/zstack/network/service/lb/LoadBalancerBase.java" }, + { + "raw": "service provider type mismatching. The load balancer[uuid:%s] is provided by the service provider[type:%s], but new service provider is [type: %s]", + "en_US": "service provider type mismatching. The load balancer[uuid:{0}] is provided by the service provider[type:{1}], but new service provider is [type: {2}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getProviderType()", + "providerType" + ], + "fileName": "src/main/java/org/zstack/network/service/lb/LoadBalancerBase.java" + }, { "raw": "there is listener with same port [%s] and same load balancer [uuid:%s]", "en_US": "there is listener with same port [{0}] and same load balancer [uuid:{1}]", diff --git a/conf/i18n_json/i18n_localstorage.json b/conf/i18n_json/i18n_localstorage.json index 3839c8faa44..1a33d89880f 100644 --- a/conf/i18n_json/i18n_localstorage.json +++ b/conf/i18n_json/i18n_localstorage.json @@ -17,6 +17,22 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageAllocatorFactory.java" }, + { + "raw": "To create volume on the local primary storage, you must specify the host that the volume is going to be created using the system tag [%s]", + "en_US": "To create volume on the local primary storage, you must specify the host that the volume is going to be created using the system tag [{0}]", + "zh_CN": "创建本地主存储硬盘时,必须指定硬盘要创建的虚拟机,请使用系统标签[{0}]指定虚拟机", + "arguments": [ + "LocalStorageSystemTags.DEST_HOST_FOR_CREATING_DATA_VOLUME.getTagFormat()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageAllocatorFactory.java" + }, + { + "raw": "invalid uri, correct example is file://$URL;hostUuid://$HOSTUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID", + "en_US": "invalid uri, correct example is file://$URL;hostUuid://$HOSTUUID or volume://$VOLUMEUUID or volumeSnapshotReuse://$SNAPSHOTUUID", + "zh_CN": "URI格式错误,正确格式为file://$URL;hostUuid://$HOSTUUID 或 volume://$VOLUMEUUID 或 volumeSnapshotReuse://$SNAPSHOTUUID", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageAllocatorFactory.java" + }, { "raw": "the volume[uuid:%s] is not on any local primary storage", "en_US": "the volume[uuid:{0}] is not on any local primary storage", @@ -54,6 +70,29 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" }, + { + "raw": "the dest host[uuid:%s] doesn\u0027t belong to the local primary storage[uuid:%s] where the volume[uuid:%s] locates", + "en_US": "the dest host[uuid:{0}] doesn\u0027t belong to the local primary storage[uuid:{1}] where the volume[uuid:{2}] locates", + "zh_CN": "目标主机[uuid:{0}]不属于本地主存储[uuid:{1}],未找到硬盘[uuid:{2}]", + "arguments": [ + "msg.getDestHostUuid()", + "ref.getPrimaryStorageUuid()", + "msg.getVolumeUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, + { + "raw": "the dest host[uuid:%s] doesn\u0027t have enough physical capacity due to the threshold of primary storage[uuid:%s] is %f but available physical capacity is %d", + "en_US": "the dest host[uuid:{0}] doesn\u0027t have enough physical capacity due to the threshold of primary storage[uuid:{1}] is {2} but available physical capacity is {3}", + "zh_CN": "目标主机[uuid:{0}]没有足够的物理容量,因为主存储[uuid:{1}]的阈值是{2},可用物理容量是{3}", + "arguments": [ + "msg.getDestHostUuid()", + "msg.getPrimaryStorageUuid()", + "physicalThreshold", + "refVO.getAvailablePhysicalCapacity()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, { "raw": "the volume[uuid:%s] is not in status of Ready, cannot migrate it", "en_US": "the volume[uuid:{0}] is not in status of Ready, cannot migrate it", @@ -63,6 +102,49 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" }, + { + "raw": "the data volume[uuid:%s, name: %s] is still attached to the VM[uuid:%s]. Please detach it before migration", + "en_US": "the data volume[uuid:{0}, name: {1}] is still attached to the VM[uuid:{2}]. Please detach it before migration", + "zh_CN": "数据云盘[uuid:{0}, name: {1}]还挂载在虚拟机[uuid:{2}]上,请先卸载该硬盘", + "arguments": [ + "vol.getUuid()", + "vol.getName()", + "vol.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, + { + "raw": "the volume[uuid:%s] is the root volume of the vm[uuid:%s]. Currently the vm is in state of %s, please stop it before migration", + "en_US": "the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm is in state of {2}, please stop it before migration", + "zh_CN": "硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前状态是{2},请先停止该虚拟机", + "arguments": [ + "vol.getUuid()", + "vol.getVmInstanceUuid()", + "vmstate" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, + { + "raw": "the volume[uuid:%s] is the root volume of the vm[uuid:%s]. Currently the vm still has %s data volumes attached, please detach them before migration", + "en_US": "the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm still has {2} data volumes attached, please detach them before migration", + "zh_CN": "硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前有{2}个数据盘挂载,请先卸载这些硬盘", + "arguments": [ + "vol.getUuid()", + "vol.getVmInstanceUuid()", + "count" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, + { + "raw": "the volume[uuid:%s] is the root volume of the vm[uuid:%s]. Currently the vm still has ISO attached, please detach it before migration", + "en_US": "the volume[uuid:{0}] is the root volume of the vm[uuid:{1}]. Currently the vm still has ISO attached, please detach it before migration", + "zh_CN": "硬盘[uuid:{0}]是虚拟机[uuid:{1}]的根盘,虚拟机当前有ISO挂载,迁移前请先卸载该ISO", + "arguments": [ + "vol.getUuid()", + "vol.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageApiInterceptor.java" + }, { "raw": "The clusterUuid of vm[uuid:%s] cannot be null when migrate the root volume[uuid:%s, name: %s]", "en_US": "The clusterUuid of vm[uuid:{0}] cannot be null when migrate the root volume[uuid:{1}, name: {2}]", @@ -117,6 +199,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" }, + { + "raw": "volume[uuid:%s] is not on the local storage anymore,it may have been deleted", + "en_US": "volume[uuid:{0}] is not on the local storage anymore,it may have been deleted", + "zh_CN": "硬盘[uuid:{0}]不在本地存储上,可能已被删除", + "arguments": [ + "msg.getVolumeUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" + }, { "raw": "local primary storage[uuid:%s] doesn\u0027t have the host[uuid:%s]", "en_US": "local primary storage[uuid:{0}] doesn\u0027t have the host[uuid:{1}]", @@ -127,6 +218,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" }, + { + "raw": "failed to download image[uuid:%s] to all hosts in the local storage[uuid:%s]. %s", + "en_US": "failed to download image[uuid:{0}] to all hosts in the local storage[uuid:{1}]. {2}", + "zh_CN": "尝试将镜像[uuid:{0}]下载到本地存储[uuid:{1}]的所有主机中失败。", + "arguments": [ + "msg.getImage().getUuid()", + "self.getUuid()", + "JSONObjectUtil.toJsonString(ret.errorCodes)" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" + }, { "raw": "unable to create the data volume[uuid: %s] on a local primary storage[uuid:%s], because the hostUuid is not specified.", "en_US": "unable to create the data volume[uuid: {0}] on a local primary storage[uuid:{1}], because the hostUuid is not specified.", @@ -195,6 +297,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" }, + { + "raw": "volume[uuid:%s] has reference volume[%s], can not change volume type before flatten them and their descendants", + "en_US": "volume[uuid:{0}] has reference volume[{1}], can not change volume type before flatten them and their descendants", + "zh_CN": "硬盘[uuid:{0}]有引用硬盘[uuid:{1}],不能改变硬盘类型,请先展开该硬盘和它的子硬盘", + "arguments": [ + "volumeUuid", + "infos.toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java" + }, { "raw": "There is no LocalStorage primary storage[state\u003d%s,status\u003d%s] on the cluster[%s], when the cluster mounts multiple primary storage, the system uses the local primary storage by default. Check the state/status of primary storage and make sure they have been attached to clusters", "en_US": "There is no LocalStorage primary storage[state\u003d{0},status\u003d{1}] on the cluster[{2}], when the cluster mounts multiple primary storage, the system uses the local primary storage by default. Check the state/status of primary storage and make sure they have been attached to clusters", @@ -206,6 +318,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageDefaultAllocateCapacityFlow.java" }, + { + "raw": "the type of primary storage[uuid:%s] chosen is not local storage, check if the resource can be created on other storage when cluster has attached local primary storage", + "en_US": "the type of primary storage[uuid:{0}] chosen is not local storage, check if the resource can be created on other storage when cluster has attached local primary storage", + "zh_CN": "选择的主存储[uuid:{0}]的类型不是本地存储,请检查当集群有绑定了本地存储的时候,资源是否能被创建在非本地存储中", + "arguments": [ + "psUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageDefaultAllocateCapacityFlow.java" + }, { "raw": "The cluster mounts multiple primary storage[%s(%s), other non-LocalStorage primary storage], primaryStorageUuidForDataVolume cannot be specified %s", "en_US": "The cluster mounts multiple primary storage[{0}({1}), other non-LocalStorage primary storage], primaryStorageUuidForDataVolume cannot be specified {2}", @@ -271,6 +392,79 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" }, + { + "raw": "cannot attach the data volume[uuid:%s] to the vm[uuid:%s]. Both vm\u0027s root volume and the data volume are on local primary storage, but they are on different hosts. The root volume[uuid:%s] is on the host[uuid:%s] but the data volume[uuid: %s] is on the host[uuid: %s]", + "en_US": "cannot attach the data volume[uuid:{0}] to the vm[uuid:{1}]. Both vm\u0027s root volume and the data volume are on local primary storage, but they are on different hosts. The root volume[uuid:{2}] is on the host[uuid:{3}] but the data volume[uuid: {4}] is on the host[uuid: {5}]", + "zh_CN": "不能加载硬盘[uuid:{0}]到虚拟机[uuid:{1}]上,硬盘和虚拟机的硬盘都位于本地存储,但是它们位于不同的主机上。虚拟机的硬盘位于主机[uuid:{3}]上,硬盘位于主机[uuid:{5}]上", + "arguments": [ + "volume.getUuid()", + "vm.getUuid()", + "vm.getRootVolumeUuid()", + "rootHost", + "volume.getUuid()", + "dataHost" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "the data volume[name:%s, uuid:%s] is on the local storage[uuid:%s]; however,the host on which the data volume is has been deleted. Unable to recover this volume", + "en_US": "the data volume[name:{0}, uuid:{1}] is on the local storage[uuid:{2}]; however,the host on which the data volume is has been deleted. Unable to recover this volume", + "zh_CN": "不能恢复硬盘[名称:{0},uuid:{1}],硬盘位于本地存储[uuid:{2}]上,但是该硬盘所在的主机已被删除", + "arguments": [ + "vol.getName()", + "vol.getUuid()", + "vol.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "unable to recover the vm[uuid:%s, name:%s]. The vm\u0027s root volume is on the local storage[uuid:%s]; however, the host on which the root volume is has been deleted", + "en_US": "unable to recover the vm[uuid:{0}, name:{1}]. The vm\u0027s root volume is on the local storage[uuid:{2}]; however, the host on which the root volume is has been deleted", + "zh_CN": "不能恢复虚拟机[uuid:{0},名称:{1}],虚拟机的硬盘位于本地存储[uuid:{2}]上,但是该硬盘所在的主机已被删除", + "arguments": [ + "vm.getUuid()", + "vm.getName()", + "psuuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "unable to live migrate vm[uuid:%s] with data volumes on local storage. Need detach all data volumes first.", + "en_US": "unable to live migrate vm[uuid:{0}] with data volumes on local storage. Need detach all data volumes first.", + "zh_CN": "不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有硬盘位于本地存储上", + "arguments": [ + "vm.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "unable to live migrate vm[uuid:%s] with local storage. Only linux guest is supported. Current platform is [%s]", + "en_US": "unable to live migrate vm[uuid:{0}] with local storage. Only linux guest is supported. Current platform is [{1}]", + "zh_CN": "不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有硬盘在本地存储上。仅 Linux 虚拟机支持热迁移, 当前平台为 [{1}]", + "arguments": [ + "vm.getUuid()", + "vm.getPlatform()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "unable to live migrate vm[uuid:%s] with ISO on local storage. Need detach all ISO first.", + "en_US": "unable to live migrate vm[uuid:{0}] with ISO on local storage. Need detach all ISO first.", + "zh_CN": "不能对虚拟机[uuid:{0}]进行热迁移,该虚拟机有ISO位于本地存储上,请先卸载ISO", + "arguments": [ + "vm.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, + { + "raw": "To create data volume on the local primary storage, you must specify the host that the data volume is going to be created using the system tag [%s]", + "en_US": "To create data volume on the local primary storage, you must specify the host that the data volume is going to be created using the system tag [{0}]", + "zh_CN": "创建本地存储上的数据盘时,必须指定创建该数据盘的主机,请使用系统标签[{0}]指定创建该数据盘的主机", + "arguments": [ + "LocalStorageSystemTags.DEST_HOST_FOR_CREATING_DATA_VOLUME.getTagFormat()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" + }, { "raw": "the host[uuid:%s] doesn\u0027t belong to the local primary storage[uuid:%s]", "en_US": "the host[uuid:{0}] doesn\u0027t belong to the local primary storage[uuid:{1}]", @@ -292,6 +486,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageFactory.java" }, + { + "raw": "the image[uuid:%s, name: %s] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:%s]", + "en_US": "the image[uuid:{0}, name: {1}] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:{2}]", + "zh_CN": "镜像[uuid:{0},名称:{1}]在任意备份存储上不可用:\\n1. 检查镜像是否处于删除状态\\n2. 检查镜像在哪个备份存储上处于就绪状态,该备份存储是否已挂载到该区域[uuid:{2}]", + "arguments": [ + "ispec.getInventory().getUuid()", + "ispec.getInventory().getName()", + "self.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java" + }, { "raw": "root image has been deleted, cannot reimage now", "en_US": "root image has been deleted, cannot reimage now", @@ -413,6 +618,47 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageKvmSftpBackupStorageMediatorImpl.java" }, + { + "raw": "the required host[uuid:%s] cannot satisfy conditions[state: %s, status: %s, size \u003e %s bytes], or doesn\u0027t belong to a local primary storage satisfying conditions[state: %s, status: %s], or its cluster doesn\u0027t attach to any local primary storage", + "en_US": "the required host[uuid:{0}] cannot satisfy conditions[state: {1}, status: {2}, size \u003e {3} bytes], or doesn\u0027t belong to a local primary storage satisfying conditions[state: {4}, status: {5}], or its cluster doesn\u0027t attach to any local primary storage", + "zh_CN": "不能满足条件[状态:{1}, 状态:{2}, 大小等于{3}字节]的主机[uuid:{0}],或者该主机不属于满足条件的本地存储[状态:{4}, 状态:{5}],或者该主机所属的集群没有绑定任何本地存储", + "arguments": [ + "spec.getRequiredHostUuid()", + "HostState.Enabled", + "HostStatus.Connected", + "spec.getSize()", + "PrimaryStorageState.Enabled", + "PrimaryStorageStatus.Connected" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java" + }, + { + "raw": "no local primary storage in zone[uuid:%s] can satisfy conditions[state: %s, status: %s] or contain hosts satisfying conditions[state: %s, status: %s, size \u003e %s bytes]", + "en_US": "no local primary storage in zone[uuid:{0}] can satisfy conditions[state: {1}, status: {2}] or contain hosts satisfying conditions[state: {3}, status: {4}, size \u003e {5} bytes]", + "zh_CN": "没有满足条件的本地存储[状态:{1}, 状态:{2}]或者该本地存储中不能满足条件的主机[状态:{3}, 状态:{4}, 大小等于{5}字节]", + "arguments": [ + "spec.getRequiredZoneUuid()", + "PrimaryStorageState.Enabled", + "PrimaryStorageStatus.Connected", + "HostState.Enabled", + "HostStatus.Connected", + "spec.getSize()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java" + }, + { + "raw": "no local primary storage can satisfy conditions[state: %s, status: %s] or contain hosts satisfying conditions[state: %s, status: %s, size \u003e %s bytes]", + "en_US": "no local primary storage can satisfy conditions[state: {0}, status: {1}] or contain hosts satisfying conditions[state: {2}, status: {3}, size \u003e {4} bytes]", + "zh_CN": "没有满足条件的本地存储[状态:{0}, 状态:{1}]或者该本地存储中不能满足条件主机[状态:{2}, 状态:{3}, 大小等于{4}字节]", + "arguments": [ + "PrimaryStorageState.Enabled", + "PrimaryStorageStatus.Connected", + "HostState.Enabled", + "HostStatus.Connected", + "spec.getSize()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java" + }, { "raw": "{the physical capacity usage of the host[uuid:%s] has exceeded the threshold[%s]}", "en_US": "{the physical capacity usage of the host[uuid:{0}] has exceeded the threshold[{1}]}", @@ -440,6 +686,18 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageUtils.java" }, + { + "raw": "host[uuid: %s] of local primary storage[uuid: %s] doesn\u0027t have enough capacity[current: %s bytes, needed: %s]", + "en_US": "host[uuid: {0}] of local primary storage[uuid: {1}] doesn\u0027t have enough capacity[current: {2} bytes, needed: {3}]", + "zh_CN": "主机[uuid:{0}]上的本地存储[uuid:{1}]没有足够的容量[当前:{2}字节,所需:{3}]", + "arguments": [ + "hostUuid", + "self.getUuid()", + "ref.getAvailableCapacity()", + "size" + ], + "fileName": "src/main/java/org/zstack/storage/primary/local/LocalStorageUtils.java" + }, { "raw": "cannot find any host which has resource[uuid:%s]", "en_US": "cannot find any host which has resource[uuid:{0}]", diff --git a/conf/i18n_json/i18n_mevoco.json b/conf/i18n_json/i18n_mevoco.json index ed4f5876d5b..c4a8b3d1537 100644 --- a/conf/i18n_json/i18n_mevoco.json +++ b/conf/i18n_json/i18n_mevoco.json @@ -1,4 +1,24 @@ [ + { + "raw": "More than one BackupStorage on the same host identified by hostname. There has been a SftpBackupStorage [hostname:%s] existing. The BackupStorage type to be added is %s. ", + "en_US": "More than one BackupStorage on the same host identified by hostname. There has been a SftpBackupStorage [hostname:{0}] existing. The BackupStorage type to be added is {1}. ", + "zh_CN": "多个 BackupStorage 在相同的主机上被识别,已经存在一个 SftpBackupStorage[hostname:{0}]。要添加的 BackupStorage 类型是{1}。", + "arguments": [ + "hostname", + "newBS" + ], + "fileName": "src/main/java/org/zstack/apimediator/ApiValidator.java" + }, + { + "raw": "More than one BackupStorage on the same host identified by hostname. There has been an ImageStoreBackupStorage [hostname:%s] existing. The BackupStorage type to be added is %s. ", + "en_US": "More than one BackupStorage on the same host identified by hostname. There has been an ImageStoreBackupStorage [hostname:{0}] existing. The BackupStorage type to be added is {1}. ", + "zh_CN": "多个 BackupStorage 在相同主机上被识别,已经存在一个 ImageStoreBackupStorage[hostname:{0}]。要添加的 BackupStorage 类型是{1}", + "arguments": [ + "hostname", + "newBS" + ], + "fileName": "src/main/java/org/zstack/apimediator/ApiValidator.java" + }, { "raw": "VM [uuid: %s] has already been added to affinityGroup [uuid: %s]", "en_US": "VM [uuid: {0}] has already been added to affinityGroup [uuid: {1}]", @@ -179,6 +199,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" }, + { + "raw": "cannot assign xmit_hash_policy [%s] for mode [%s], because only mode 802.3ad support specifying different xmit_hash_policys", + "en_US": "cannot assign xmit_hash_policy [{0}] for mode [{1}], because only mode 802.3ad support specifying different xmit_hash_policys", + "zh_CN": "", + "arguments": [ + "xmitHashPolicy", + "mode" + ], + "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" + }, { "raw": "xmit_hash_policy for mode [%s] should not be null", "en_US": "xmit_hash_policy for mode [{0}] should not be null", @@ -232,6 +262,24 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" }, + { + "raw": "bonding card can not have occupied interfaces, which was already been used by bonding[uuid:%s]", + "en_US": "bonding card can not have occupied interfaces, which was already been used by bonding[uuid:{0}]", + "zh_CN": "绑定卡不能有被占用的接口,该接口已被绑定[uuid:{0}]使用", + "arguments": [ + "bondingUuid" + ], + "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" + }, + { + "raw": "bonding card can not have interfaces that has been used as a network bridge, which was already been used by host[%s]", + "en_US": "bonding card can not have interfaces that has been used as a network bridge, which was already been used by host[{0}]", + "zh_CN": "绑定卡不能有被使用的网络桥接接口,该接口已被主机[uuid:{0}]使用", + "arguments": [ + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" + }, { "raw": "bonding card can not have interfaces that has been pass-through", "en_US": "bonding card can not have interfaces that has been pass-through", @@ -248,6 +296,15 @@ ], "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" }, + { + "raw": "bonding card can not have [%s] interfaces,it must be the number between[1~8]", + "en_US": "bonding card can not have [{0}] interfaces,it must be the number between[1~8]", + "zh_CN": "绑定卡不能有[{0}]个接口,该接口数量必须在[1~8]之间", + "arguments": [ + "size" + ], + "fileName": "src/main/java/org/zstack/compute/bonding/HostNetworkBondingUtils.java" + }, { "raw": "[%s] bonding card can not have [%s] interfaces, it must be the number between[1~8]", "en_US": "[{0}] bonding card can not have [{1}] interfaces, it must be the number between[1~8]", @@ -540,6 +597,15 @@ ], "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" }, + { + "raw": "failed to allocate pci device on host[uuid:%s], because there are not enough pci devices available", + "en_US": "failed to allocate pci device on host[uuid:{0}], because there are not enough pci devices available", + "zh_CN": "无法在主机[uuid:{0}]上分配 PCI 设备,因为没有足够的 PCI 设备可用", + "arguments": [ + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" + }, { "raw": "networkInterface[name:%s] of host[uuid:%s] can not find", "en_US": "networkInterface[name:{0}] of host[uuid:{1}] can not find", @@ -550,6 +616,16 @@ ], "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" }, + { + "raw": "only support do live snapshot on vm state[%s], but vm is on [%s] state", + "en_US": "only support do live snapshot on vm state[{0}], but vm is on [{1}] state", + "zh_CN": "仅支持对处于[{0}]状态的虚拟机进行快照,但虚拟机处于[{1}]状态", + "arguments": [ + "vmInstanceVO.getUuid()", + "vmInstanceVO.getState()" + ], + "fileName": "src/main/java/org/zstack/compute/host/MevocoHostBase.java" + }, { "raw": "kvm host[uuid:%s, name:%s, ip:%s] doesn\u0027t not support live snapshot. please stop vm[uuid:%s] and try again", "en_US": "kvm host[uuid:{0}, name:{1}, ip:{2}] doesn\u0027t not support live snapshot. please stop vm[uuid:{3}] and try again", @@ -1011,6 +1087,26 @@ ], "fileName": "src/main/java/org/zstack/compute/vdpa/KVMRealizeL2NoVlanOvsDpdkBackend.java" }, + { + "raw": "could not ungenerate pci device[uuid:%s], becausethere are another l2[uuid:%s] use the physical network interface attached to cluster", + "en_US": "could not ungenerate pci device[uuid:{0}], becausethere are another l2[uuid:{1}] use the physical network interface attached to cluster", + "zh_CN": "无法取消生成PCI设备[uuid:{0}],因为该物理网络接口被其他二层网络[uuid:{1}]使用", + "arguments": [ + "msg.getPciDeviceUuid()", + "l2Uuids" + ], + "fileName": "src/main/java/org/zstack/compute/vdpa/VmVdpaNicApiInterceptor.java" + }, + { + "raw": "could not generate pci device[uuid:%s], becausethere are another l2[uuid:%s] use the physical network interface attached to cluster", + "en_US": "could not generate pci device[uuid:{0}], because there are another l2[uuid:{1}] use the physical network interface attached to cluster", + "zh_CN": "无法生成PCI设备[uuid:{0}],因为该物理网络接口被其他二层网络[uuid:{1}]使用", + "arguments": [ + "msg.getPciDeviceUuid()", + "l2Uuids" + ], + "fileName": "src/main/java/org/zstack/compute/vdpa/VmVdpaNicApiInterceptor.java" + }, { "raw": "only %s support vdpa", "en_US": "only {0} support vdpa", @@ -1115,6 +1211,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vm/ChangeVmPasswordFlow.java" }, + { + "raw": "not account preference found, send change password cmd to the host!", + "en_US": "not account preference found, send change password cmd to the host!", + "zh_CN": "没有发现 account preference,不能发送更改密码的指令到这个主机上", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vm/ChangeVmPasswordFlow.java" + }, { "raw": "fail to attach virtio driver because read md5 of file[%s] fail in mn[uuid:%s]: file not found on classpath", "en_US": "fail to attach virtio driver because read md5 of file[{0}] fail in mn[uuid:{1}]: file not found on classpath", @@ -1196,6 +1299,39 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" }, + { + "raw": "vm[uuid:%s] is in cluster[uuid:%s], but there is no available host in the cluster, cannot change image for the vm", + "en_US": "vm[uuid:{0}] is in cluster[uuid:{1}], but there is no available host in the cluster, cannot change image for the vm", + "zh_CN": "VM[uuid:{0}]位于集群[uuid:{1}]中,但集群中没有可用的主机,无法更改虚拟机镜像", + "arguments": [ + "self.getUuid()", + "self.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" + }, + { + "raw": "unable to allocate hosts, no host meets the following conditions: clusterUuid\u003d%s hostUuid\u003d%s cpu\u003d%d memoryCapacity\u003d%d L3NetworkUuids\u003d%s", + "en_US": "unable to allocate hosts, no host meets the following conditions: clusterUuid\u003d{0} hostUuid\u003d{1} cpu\u003d{2} memoryCapacity\u003d{3} L3NetworkUuids\u003d{4}", + "zh_CN": "无法分配主机,无满足以下条件的主机:集群uuid\u003d{0} 主机uuid\u003d{1} CPU\u003d{2} 内存容量\u003d{3} L3NetworkUuids\u003d{4}", + "arguments": [ + "amsg.getClusterUuids()", + "amsg.getHostUuid()", + "amsg.getCpuCapacity()", + "amsg.getMemoryCapacity()", + "amsg.getL3NetworkUuids()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" + }, + { + "raw": "can not find backup storage, unable to commit volume snapshot[psUuid:%s] as image, destination required PS uuid:%s", + "en_US": "can not find backup storage, unable to commit volume snapshot[psUuid:{0}] as image, destination required PS uuid:{1}", + "zh_CN": "无法找到数据存储,无法将卷快照[psUuid:{0}]作为镜像,目标需要的主存储 uuid:{1}", + "arguments": [ + "vol.getPrimaryStorageUuid()", + "finalRequiredPsUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" + }, { "raw": "direction must be set to in or out", "en_US": "direction must be set to in or out", @@ -1237,6 +1373,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" }, + { + "raw": "not dest host found in db by uuid: %s, can\u0027t send change password cmd to the host!", + "en_US": "not dest host found in db by uuid: {0}, can\u0027t send change password cmd to the host!", + "zh_CN": "无法在数据库中找到该主机的uuid:{0},无法向该主机发送修改密码命令", + "arguments": [ + "amsg.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/MevocoVmInstanceBase.java" + }, { "raw": "state is not correct while change password.", "en_US": "state is not correct while change password.", @@ -1254,6 +1399,15 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" }, + { + "raw": "failed to convert vm to templated vm, because the vm has scheduled jobs [%s]", + "en_US": "failed to convert vm to templated vm, because the vm has scheduled jobs [{0}]", + "zh_CN": "转换虚拟机为模板虚拟机失败,该虚拟机有计划任务[{0}]", + "arguments": [ + "jobsBuilder.toString()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" + }, { "raw": "The number of data volumes exceeds the limit[num: %s], please reduce the number of data volumes during vm creation.", "en_US": "The number of data volumes exceeds the limit[num: {0}], please reduce the number of data volumes during vm creation.", @@ -1273,6 +1427,13 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" }, + { + "raw": "can not set primaryStorageUuidForRootVolume or primaryStorageUuidForDataVolume or rootVolumeSystemTags or dataVolumeSystemTags when diskAOs is not empty", + "en_US": "can not set primaryStorageUuidForRootVolume or primaryStorageUuidForDataVolume or rootVolumeSystemTags or dataVolumeSystemTags when diskAOs is not empty", + "zh_CN": "不能在 diskAOs 不为空的情况下设置 primaryStorageUuidForRootVolume 或 primaryStorageUuidForDataVolume 或 rootVolumeSystemTags 或 dataVolumeSystemTags", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" + }, { "raw": "invalid json format, causes: %s", "en_US": "invalid json format, causes: {0}", @@ -1289,6 +1450,19 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" }, + { + "raw": "there are not enough capacity for full vm clone to vm[uuid: %s], volumes[uuid: %s] on primary storage[uuid: %s] required: %s bytes, current available capacity is %s bytes", + "en_US": "there are not enough capacity for full vm clone to vm[uuid: {0}], volumes[uuid: {1}] on primary storage[uuid: {2}] required: {3} bytes, current available capacity is {4} bytes", + "zh_CN": "虚拟机克隆失败,因为存储空间不足,虚拟机[uuid:{0}]的云盘[uuid:{1}]在存储[uuid:{2}]上所需空间为 {3} 字节,当前可用空间为 {4} 字节", + "arguments": [ + "vmInstanceVO.getUuid()", + "volumeVOS.stream().map(VolumeVO::getUuid).collect(Collectors.toList())", + "primaryStorageUuid", + "(totalCapacity - snapshotsCapacity) * size", + "primaryStorageVO.getCapacity().getAvailableCapacity()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" + }, { "raw": "The nic [%s%s] is not mounted on the VM", "en_US": "The nic [{0}{1}] is not mounted on the VM", @@ -1448,6 +1622,16 @@ ], "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" }, + { + "raw": "the cache of a templated vmInstance[uuid:%s] can contain only one or zero snapshot groups. the current number of snapshot groups is %d.", + "en_US": "the cache of a templated vmInstance[uuid:{0}] can contain only one or zero snapshot groups. the current number of snapshot groups is {1}.", + "zh_CN": "模板虚拟机[uuid:{0}] 的快照组只能包含一个或零个快照组,当前快照组数量为 {1}", + "arguments": [ + "msg.getTemplatedVmInstanceUuid()", + "groups.size()" + ], + "fileName": "src/main/java/org/zstack/compute/vm/VmMevocoApiInterceptor.java" + }, { "raw": "the templated vmInstance[uuid:%s] is not exist", "en_US": "the templated vmInstance[uuid:{0}] is not exist", @@ -1587,6 +1771,18 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" }, + { + "raw": "unmatched zone detected, host[uuid: %s, zone uuid: %s]\u0027s zone is different from host sheduling rule group[uuid: %s, zone uuid: %s]", + "en_US": "unmatched zone detected, host[uuid: {0}, zone uuid: {1}]\u0027s zone is different from host sheduling rule group[uuid: {2}, zone uuid: {3}]", + "zh_CN": "未匹配的区被检测到,主机 [uuid:{0},区域uuid:{1}] 的区域与主机调度组 [uuid:{2},区域uuid:{3}] 的区域不同", + "arguments": [ + "hostVO.getUuid()", + "hostVO.getZoneUuid()", + "hostGroup.getUuid()", + "hostGroup.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, { "raw": "vm[uuid:%s] already attached to vm scheduling group[uuid:%s]", "en_US": "vm[uuid:{0}] already attached to vm scheduling group[uuid:{1}]", @@ -1597,6 +1793,18 @@ ], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" }, + { + "raw": "unmatched zone detected, vm[uuid: %s, zone uuid: %s]\u0027s zone is different from vm sheduling rule group[uuid: %s, zone uuid: %s]", + "en_US": "unmatched zone detected, vm[uuid: {0}, zone uuid: {1}]\u0027s zone is different from vm sheduling rule group[uuid: {2}, zone uuid: {3}]", + "zh_CN": "未匹配的区被检测到,虚拟机 [uuid:{0},区域uuid:{1}] 的区域与虚拟机调度组 [uuid:{2},区域uuid:{3}] 的区域不同", + "arguments": [ + "vm.getUuid()", + "vm.getZoneUuid()", + "groupVO.getUuid()", + "groupVO.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, { "raw": "vm can change its vm scheduling group only in state [%s,%s], but vm is in state [%s]", "en_US": "vm can change its vm scheduling group only in state [{0},{1}], but vm is in state [{2}]", @@ -1629,6 +1837,97 @@ "arguments": [], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" }, + { + "raw": "the vm scheduling group has already had a vms antiaffinity from hosts scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vm antiaffinity from Each Other scheduling rule to the group", + "en_US": "the vm scheduling group has already had a vms antiaffinity from hosts scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vm antiaffinity from Each Other scheduling rule to the group", + "zh_CN": "该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms antiaffinity from Hosts scheduling policy to the group.", + "en_US": "the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms antiaffinity from Hosts scheduling policy to the group.", + "zh_CN": "该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm antiaffinity from each other scheduling rule attached. attaching another one is not allowed.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm antiaffinity from each other scheduling rule attached. attaching another one is not allowed.", + "zh_CN": "该虚拟机调度组已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm affinitive to each other scheduling rule attached. Attaching a vm antiaffinity from each other scheduling rule is not allowed.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm affinitive to each other scheduling rule attached. Attaching a vm antiaffinity from each other scheduling rule is not allowed.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm affinitive to each other scheduling rule attached.attaching another one is not allowed.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm affinitive to each other scheduling rule attached.attaching another one is not allowed.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm exclusive from each other scheduling rule attached. Attaching a vm affinitive to each other scheduling policy is not allowed.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm exclusive from each other scheduling rule attached. Attaching a vm affinitive to each other scheduling policy is not allowed.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机独占性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a executed exclusive vm or affinitive vm scheduling policy attached. you cannot attach either of the two scheduling policies that require execution to the group again", + "en_US": "the vm scheduling group[uuid:{0}] has already had a executed exclusive vm or affinitive vm scheduling policy attached. you cannot attach either of the two scheduling policies that require execution to the group again", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机执行调度策略,不可绑定其它虚拟机调度策略", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vms affinitive to hosts scheduling rule attached. you cannot attach another one to the group again.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vms affinitive to hosts scheduling rule attached. you cannot attach another one to the group again.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm antiaffinity from host scheduling rule attached. you cannot attach a vms affinitive to host scheduling rule to the group.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm antiaffinity from host scheduling rule attached. you cannot attach a vms affinitive to host scheduling rule to the group.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机反亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group[uuid:%s] has already had a vm affinitive to hosts scheduling rule attached. you cannot attach a vm antiaffinity from hosts scheduling rule to the group.", + "en_US": "the vm scheduling group[uuid:{0}] has already had a vm affinitive to hosts scheduling rule attached. you cannot attach a vm antiaffinity from hosts scheduling rule to the group.", + "zh_CN": "该虚拟机调度组 {0} 已绑定虚拟机亲和性调度策略,不可绑定其它虚拟机调度策略", + "arguments": [ + "vmGroupUuid" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, + { + "raw": "the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms affinitive to hosts scheduling policy to the group.", + "en_US": "the vm scheduling group has already had a vm antiaffinity from each other scheduling rule attached. the number of hosts available for the vm in the scheduling group to run is less than that of the vm in the group. you cannot attach a vms affinitive to hosts scheduling policy to the group.", + "zh_CN": "该虚拟机调度组已绑定虚拟机反亲和性调度策略,无法绑定其它虚拟机调度策略", + "arguments": [], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleApiInterceptor.java" + }, { "raw": "can not satisfied vm scheduling rule group conditions", "en_US": "can not satisfied vm scheduling rule group conditions", @@ -1647,6 +1946,17 @@ ], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleGroupBase.java" }, + { + "raw": "vm[uuid:%s] is now running on host[uuid:%s],which does not comply with the scheduling rule associated with vm scheduling group[uuid:%s].", + "en_US": "vm[uuid:{0}] is now running on host[uuid:{1}],which does not comply with the scheduling rule associated with vm scheduling group[uuid:{2}].", + "zh_CN": "虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{2}]关联的调度规则。", + "arguments": [ + "vmInv.getUuid()", + "hostUuid", + "refVO.getVmGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleGroupBase.java" + }, { "raw": "hostGroup[uuid:%s] is no host", "en_US": "hostGroup[uuid:{0}] is no host", @@ -1656,6 +1966,57 @@ ], "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleGroupBase.java" }, + { + "raw": "vm[uuid:%s] is now running on host[uuid:%s], which does not comply with the scheduling rule[%s] associated with vm scheduling group[uuid:%s].", + "en_US": "vm[uuid:{0}] is now running on host[uuid:{1}], which does not comply with the scheduling rule[{2}] associated with vm scheduling group[uuid:{3}].", + "zh_CN": "虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{3}]关联的调度规则[{2}]", + "arguments": [ + "msg.getVmUuid()", + "hostUuid", + "VMSchedulingRuleType.AFFINITY.toString()", + "msg.getVmGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleGroupBase.java" + }, + { + "raw": "vm[uuid:%s] is now running on host[uuid:%s],which does not comply with the scheduling rule[%s] associated with vm scheduling group[uuid:%s].", + "en_US": "vm[uuid:{0}] is now running on host[uuid:{1}],which does not comply with the scheduling rule[{2}] associated with vm scheduling group[uuid:{3}].", + "zh_CN": "虚拟机[uuid:{0}]现在运行在主机[uuid:{1}],该主机不符合虚拟机调度组[uuid:{3}]关联的调度规则[{2}]", + "arguments": [ + "msg.getVmUuid()", + "hostUuid", + "VMSchedulingRuleType.ANTIAFFINITY.toString()", + "msg.getVmGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleGroupBase.java" + }, + { + "raw": "cannot find the host scheduling group[uuid:%s], it may have been deleted", + "en_US": "cannot find the host scheduling group[uuid:{0}], it may have been deleted", + "zh_CN": "无法找到主机调度组[uuid:{0}],可能已被删除", + "arguments": [ + "msg.getHostGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleManagerImpl.java" + }, + { + "raw": "cannot find the vm scheduling rule[uuid:%s], it may have been deleted", + "en_US": "cannot find the vm scheduling rule[uuid:{0}], it may have been deleted", + "zh_CN": "无法找到虚拟机调度规则[uuid:{0}],可能已被删除", + "arguments": [ + "msg.getVmSchedulingRuleUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleManagerImpl.java" + }, + { + "raw": "cannot find the vm scheduling group[uuid:%s], it may have been deleted", + "en_US": "cannot find the vm scheduling group[uuid:{0}], it may have been deleted", + "zh_CN": "无法找到虚拟机调度组[uuid:{0}],可能已被删除", + "arguments": [ + "msg.getVmSchedulingRuleGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/compute/vmscheduling/VmSchedulingRuleManagerImpl.java" + }, { "raw": "failed to parse API message: can not parse encryption param with type %s", "en_US": "failed to parse API message: can not parse encryption param with type {0}", @@ -1992,6 +2353,13 @@ ], "fileName": "src/main/java/org/zstack/image/ImageMevocoApiInterceptor.java" }, + { + "raw": "Parse license error,\\n1. check your private key and application code is correct\\n2. check your license is not corrupted\\n3. use zstack-ctl clear_license to clear your licenses and try to reinstall\\n", + "en_US": "Parse license error,\\n1. check your private key and application code is correct\\n2. check your license is not corrupted\\n3. use zstack-ctl clear_license to clear your licenses and try to reinstall\\n", + "zh_CN": "解析许可证错误,\\n1. 检查您的私钥和应用程序代码是否正确\\n2. 检查您的许可证是否损坏\\n3. 使用zstack-ctl clear_license来清除您的许可证并重新安装", + "arguments": [], + "fileName": "src/main/java/org/zstack/license/LicenseChecker.java" + }, { "raw": "the licenseRequestCode is illegal", "en_US": "the licenseRequestCode is illegal", @@ -2205,6 +2573,25 @@ ], "fileName": "src/main/java/org/zstack/license/PlatformLicense.java" }, + { + "raw": "Hybrid platform license is already in use. You should remove Hybrid platform license and hybird add-ons license at the same timeby DeleteLicenseAction with Hybrid license UUID[uuid\u003d%s]", + "en_US": "Hybrid platform license is already in use. You should remove Hybrid platform license and hybird add-ons license at the same timeby DeleteLicenseAction with Hybrid license UUID[uuid\u003d{0}]", + "zh_CN": "混合平台许可证已使用。您应该同时删除混合平台许可证和混合模块许可证", + "arguments": [ + "licenseUuid" + ], + "fileName": "src/main/java/org/zstack/license/PlatformLicense.java" + }, + { + "raw": "The system\u0027s thumbprint has changed.\\n Detailed errors: %s.\\n If you are setting up a new system or changing an existing system, please follow the commands below:\\n 1. run `zstack-ctl clear_license` to clear and backup old license files\\n or delete the license file on path %s 2. contact sales@zstack.io to apply a license;\\n 3. run `zstack-ctl install_license -f path/to/your/license`;\\n 4. run `zstack-ctl start` to start management node.\\n", + "en_US": "The system\u0027s thumbprint has changed.\\n Detailed errors: {0}.\\n If you are setting up a new system or changing an existing system, please follow the commands below:\\n 1. run `zstack-ctl clear_license` to clear and backup old license files\\n or delete the license file on path {1} 2. contact sales@zstack.io to apply a license;\\n 3. run `zstack-ctl install_license -f path/to/your/license`;\\n 4. run `zstack-ctl start` to start management node.\\n", + "zh_CN": "系统的指纹已改变,详情如下 {0}。如果是新系统或正在改变一个现有的系统,请按照以下命令进行操作:\\n 1. 运行 zstack-ctl clear_license 来清除并备份旧的许可证文件或者删除路径 {1} 的许可证文件 2. 联系 sales@zstack.io 申请许可证; 3. 执行 `zstack-ctl install_license -f path/to/your/license`; 4. 执行 `zstack-ctl start` 启动管理节点", + "arguments": [ + "errors", + "info.getPath() !\u003d null ? info.getPath() : \"\"" + ], + "fileName": "src/main/java/org/zstack/license/PlatformLicense.java" + }, { "raw": "Unexpected thumbprint", "en_US": "Unexpected thumbprint", @@ -2388,6 +2775,17 @@ ], "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" }, + { + "raw": "the host[uuid:%s]\u0027s operating system %s %s is too old, the QEMU doesn\u0027t support QoS of network or disk IO. Please choose another instance offering with no QoS configuration", + "en_US": "the host[uuid:{0}]\u0027s operating system {1} {2} is too old, the QEMU doesn\u0027t support QoS of network or disk IO. Please choose another instance offering with no QoS configuration", + "zh_CN": "宿主机[uuid:{0}]的操作系统{1} {2}过旧,QEMU不支持网络或磁盘IO的QoS。请选择另一个实例规格,该实例规格没有QoS配置", + "arguments": [ + "hostUuid", + "os.distribution", + "os.version" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" + }, { "raw": "invalid value[%s], it must be a double greater than 0", "en_US": "invalid value[{0}], it must be a double greater than 0", @@ -2511,6 +2909,16 @@ ], "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" }, + { + "raw": "failed to create cache for templated vmInstance %s, because %s", + "en_US": "failed to create cache for templated vmInstance {0}, because {1}", + "zh_CN": "创建模板虚拟机 {0} 的缓存失败,因为 {1}", + "arguments": [ + "chunk.msg.getTemplatedVmInstanceUuid()", + "inv.getError().getDetails()" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" + }, { "raw": "all management node update factory mode failed, details: %s", "en_US": "all management node update factory mode failed, details: {0}", @@ -2637,6 +3045,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" }, + { + "raw": "shareable volume(s)[uuid: %s] attached, not support to group snapshot.", + "en_US": "shareable volume(s)[uuid: {0}] attached, not support to group snapshot.", + "zh_CN": "共享云盘 [uuid:{0}] 已经挂载,不支持快照组", + "arguments": [ + "sharedVolUuids" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoManagerImpl.java" + }, { "raw": "the license has been expired, please renew it", "en_US": "the license has been expired, please renew it", @@ -2666,6 +3083,35 @@ ], "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" }, + { + "raw": "can not found in used snapshot tree of volume[uuid: %s]. Maybe no snapshot chain need to validate.", + "en_US": "can not found in used snapshot tree of volume[uuid: {0}]. Maybe no snapshot chain need to validate.", + "zh_CN": "无法在云盘 [uuid:{0}] 的已使用快照树中找到快照。可能快照链不需要验证", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" + }, + { + "raw": "can not found latest snapshot from tree[uuid: %s] of volume[uuid: %s]. Maybe no snapshot chain need to validate.", + "en_US": "can not found latest snapshot from tree[uuid: {0}] of volume[uuid: {1}]. Maybe no snapshot chain need to validate.", + "zh_CN": "无法在云盘 [uuid:{1}] 的快照树 [uuid: {0}] 中找到最新快照。可能快照链不需要验证", + "arguments": [ + "currentTreeUuid", + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" + }, + { + "raw": "can not found snapshots from tree[uuid: %s] of volume[uuid: %s]. Maybe no snapshot chain need to validate.", + "en_US": "can not found snapshots from tree[uuid: {0}] of volume[uuid: {1}]. Maybe no snapshot chain need to validate.", + "zh_CN": "无法在云盘 [uuid:{1}] 的快照树 [uuid: {0}] 中找到快照。可能快照链不需要验证", + "arguments": [ + "currentTreeUuid", + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" + }, { "raw": "Unexpectedly, VM[uuid:%s] is not running any more, please try again later", "en_US": "Unexpectedly, VM[uuid:{0}] is not running any more, please try again later", @@ -2740,6 +3186,18 @@ ], "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" }, + { + "raw": "Cannot delete vm\u0027s volume qos on host %s, because the current vm is in state of %s, but support expect states are [%s, %s]", + "en_US": "Cannot delete vm\u0027s volume qos on host {0}, because the current vm is in state of {1}, but support expect states are [{2}, {3}]", + "zh_CN": "不能在HOST [uuid:{0}] 上删除VM [uuid:{1}] 的云盘QoS,因为VM当前状态为 {2},但支持的期望状态为[{3}, {4}]", + "arguments": [ + "ivo.getHostUuid()", + "ivo.getState()", + "VmInstanceState.Running.toString()", + "VmInstanceState.Stopped.toString()" + ], + "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeBase.java" + }, { "raw": "SetVolumeQosMsg version 1 is deprecated, please use version 2", "en_US": "SetVolumeQosMsg version 1 is deprecated, please use version 2", @@ -2907,6 +3365,15 @@ ], "fileName": "src/main/java/org/zstack/mevoco/MevocoVolumeFactoryImpl.java" }, + { + "raw": "ZStack has been paused, reject all API which are not read only. If you really want to call it and known the consequence, add \u0027%s\u0027 into systemTags.", + "en_US": "ZStack has been paused, reject all API which are not read only. If you really want to call it and known the consequence, add \u0027{0}\u0027 into systemTags.", + "zh_CN": "ZStack已暂停,拒绝所有非只读API。如果您确定要调用它,并已了解其结果的含义,请将\u0027{0}\u0027添加到系统标签中。", + "arguments": [ + "MevocoSystemTags.CONFIRM_CALL_API.getTagFormat()" + ], + "fileName": "src/main/java/org/zstack/mevoco/PauseWorldApiInterceptor.java" + }, { "raw": "the current version of license does not support modifying this global config [name:%s]", "en_US": "the current version of license does not support modifying this global config [name:{0}]", @@ -2997,6 +3464,15 @@ ], "fileName": "src/main/java/org/zstack/monitoring/MonitorManagerImpl.java" }, + { + "raw": "A resource[name:{resourceName}, uuid:{resourceUuid}, type:{resourceType}]\u0027s monitoring trigger[uuid:{triggerUuid}] changes status to {triggerStatus}", + "en_US": "A resource[name:{resourceName}, uuid:{resourceUuid}, type:{resourceType}]\u0027s monitoring trigger[uuid:{triggerUuid}] changes status to {triggerStatus}", + "zh_CN": "一个资源[name:{resourceName}, uuid:{resourceUuid}, type:{resourceType}]\u0027s monitoring 触发器 [uuid:{triggerUuid}] 状态改为 {triggerStatus}", + "arguments": [ + "args" + ], + "fileName": "src/main/java/org/zstack/monitoring/items/AlertText.java" + }, { "raw": "\\n\u003d\u003d\u003d BELOW ARE DETAILS OF THE PREVIOUS ALERT \u003d\u003d\u003d", "en_US": "\\n\u003d\u003d\u003d BELOW ARE DETAILS OF THE PREVIOUS ALERT \u003d\u003d\u003d", @@ -3079,6 +3555,22 @@ ], "fileName": "src/main/java/org/zstack/monitoring/prometheus/AlertRuleWriter.java" }, + { + "raw": "ALERT:\\n resource[name: %s, uuid: %s, type: %s]\\nevent: %s %s %s\\ncurrent value: %s\\nduration: %s seconds\\n", + "en_US": "ALERT:\\n resource[name: {0}, uuid: {1}, type: {2}]\\nevent: {3} {4} {5}\\ncurrent value: {6}\\nduration: {7} seconds\\n", + "zh_CN": "告警:\\n 资源[name: {0}, uuid: {1}, type: {2}]\\nevent: {3} {4} {5}\\n当前值: {6}\\n持续时间: {7} 秒\\n", + "arguments": [ + "resourceName", + "resourceUuid", + "toI18nString(resourceType)", + "itemName", + "toI18nString(expression.getOperator())", + "expression.getConstant()", + "value", + "tvo.getDuration()" + ], + "fileName": "src/main/java/org/zstack/monitoring/prometheus/PrometheusAlert.java" + }, { "raw": "the relativeTime[%s] is invalid, it must be in format of, for example, 10s, 1h", "en_US": "the relativeTime[{0}] is invalid, it must be in format of, for example, 10s, 1h", @@ -3648,6 +4140,15 @@ ], "fileName": "src/main/java/org/zstack/pciDevice/PciDeviceApiInterceptor.java" }, + { + "raw": "pci device[uuid:%s] cannot be virtualized into mdevs, make sure it\u0027s enabled and un-attached", + "en_US": "pci device[uuid:{0}] cannot be virtualized into mdevs, make sure it\u0027s enabled and un-attached", + "zh_CN": "PCI设备 [uuid:{0}] 无法被 MDEV 虚拟化切分,请确保设备已启用且未挂载", + "arguments": [ + "msg.getPciDeviceUuid()" + ], + "fileName": "src/main/java/org/zstack/pciDevice/PciDeviceApiInterceptor.java" + }, { "raw": "pci device[uuid:%s] cannot be virtualized by mdev spec[uuid:%s]", "en_US": "pci device[uuid:{0}] cannot be virtualized by mdev spec[uuid:{1}]", @@ -3713,6 +4214,18 @@ ], "fileName": "src/main/java/org/zstack/pciDevice/PciDeviceApiInterceptor.java" }, + { + "raw": "specified pci devices are not on the same host: pci device[uuid: %s] on host[uuid: %s] while pci device[uuid: %s] on host[uuid: %s]", + "en_US": "specified pci devices are not on the same host: pci device[uuid: {0}] on host[uuid: {1}] while pci device[uuid: {2}] on host[uuid: {3}]", + "zh_CN": "指定的PCI设备不是在同一个主机上: PCI 设备[uuid:{0}] 在主机[uuid:{1}]上,而PCI 设备[uuid:{2}] 在主机[uuid:{3}]上", + "arguments": [ + "vo.getUuid()", + "vo.getHostUuid()", + "attachedPciUuid", + "dstHostUuid" + ], + "fileName": "src/main/java/org/zstack/pciDevice/PciDeviceFilterFlow.java" + }, { "raw": "the PCI devices[uuid:%s] is not on this host", "en_US": "the PCI devices[uuid:{0}] is not on this host", @@ -3873,6 +4386,16 @@ ], "fileName": "src/main/java/org/zstack/pciDevice/PciDeviceReserveFlow.java" }, + { + "raw": "The host [%s] has failed to enter the maintenance, The vm [%s] cannot migrate automatically because it contains the PCI device", + "en_US": "The host [{0}] has failed to enter the maintenance, The vm [{1}] cannot migrate automatically because it contains the PCI device", + "zh_CN": "主机[uuid:{0}]已进入维护模式,虚拟机[uuid:{1}]无法自动迁移,因为它包含PCI设备", + "arguments": [ + "inventory.getUuid()", + "hasPciVmUuids.toString()" + ], + "fileName": "src/main/java/org/zstack/pciDevice/PciHostChangeStateExtension.java" + }, { "raw": "don\u0027t set rom version if has no rom content", "en_US": "don\u0027t set rom version if has no rom content", @@ -4243,6 +4766,18 @@ "arguments": [], "fileName": "src/main/java/org/zstack/pciDevice/virtual/vfio_mdev/MdevDeviceFilterFlow.java" }, + { + "raw": "specified mdev devices not on same host: mdev device[uuid: %s] on host[uuid: %s] while mdev device[uuid: %s] on host[uuid: %s]", + "en_US": "specified mdev devices not on same host: mdev device[uuid: {0}] on host[uuid: {1}] while mdev device[uuid: {2}] on host[uuid: {3}]", + "zh_CN": "指定的 MDEV 设备不在同一个主机上:MDEV 设备[uuid: {0}]在主机[uuid: {1}]上,而 MDEV 设备[uuid: {2}]在主机[uuid: {3}]上", + "arguments": [ + "mdev.getUuid()", + "mdev.getHostUuid()", + "attachedMdevUuid", + "dstHostUuid" + ], + "fileName": "src/main/java/org/zstack/pciDevice/virtual/vfio_mdev/MdevDeviceFilterFlow.java" + }, { "raw": "the Mdev devices[uuid:%s] is not on this host", "en_US": "the Mdev devices[uuid:{0}] is not on this host", @@ -4266,6 +4801,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/pciDevice/virtual/vfio_mdev/MdevDeviceFilterFlow.java" }, + { + "raw": "The host [%s] has failed to enter the maintenance, because vm[%s] has mdev devices attached and cannot migrate automatically", + "en_US": "The host [{0}] has failed to enter the maintenance, because vm[{1}] has mdev devices attached and cannot migrate automatically", + "zh_CN": "主机[uuid:{0}]无法进入维护模式,因为虚拟机[uuid:{1}]有MDEV设备已连接,无法自动迁移", + "arguments": [ + "inventory.getUuid()", + "hasMdevVmUuids.toString()" + ], + "fileName": "src/main/java/org/zstack/pciDevice/virtual/vfio_mdev/MdevDeviceHostChangeStateExtension.java" + }, { "raw": "failed to find enough mdev device of spec[uuid:%s] in dest host[uuid:%s] for vm[uuid:%s]", "en_US": "failed to find enough mdev device of spec[uuid:{0}] in dest host[uuid:{1}] for vm[uuid:{2}]", @@ -4563,6 +5108,15 @@ ], "fileName": "src/main/java/org/zstack/scheduler/VolumeSnapshotGroupJobFactory.java" }, + { + "raw": "the vm of the root volume[%s] is not available. check if the vm exists.", + "en_US": "the vm of the root volume[{0}] is not available. check if the vm exists.", + "zh_CN": "虚拟机的根盘 {0} 不存在,请检查虚拟机是否存在", + "arguments": [ + "msg.getTargetResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/scheduler/VolumeSnapshotGroupJobFactory.java" + }, { "raw": "snapshotGroupMaxNumber : %s format error because %s", "en_US": "snapshotGroupMaxNumber : {0} format error because {1}", @@ -4573,6 +5127,15 @@ ], "fileName": "src/main/java/org/zstack/scheduler/VolumeSnapshotGroupJobFactory.java" }, + { + "raw": "the volume[%s] is not available. check if the volume exists.", + "en_US": "the volume[{0}] is not available. check if the volume exists.", + "zh_CN": "盘[{0}]不存在,请检查卷是否存在", + "arguments": [ + "msg.getTargetResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/scheduler/VolumeSnapshotJobFactory.java" + }, { "raw": "the volume[%s] does not support snapshots retention", "en_US": "the volume[{0}] does not support snapshots retention", @@ -4592,6 +5155,15 @@ ], "fileName": "src/main/java/org/zstack/scheduler/VolumeSnapshotJobFactory.java" }, + { + "raw": "the vm of the root volume[%s] state in Destroyed. job state change is not allowed", + "en_US": "the vm of the root volume[{0}] state in Destroyed. job state change is not allowed", + "zh_CN": "虚拟机的根盘 {0} 状态为Destroyed,不允许更改状态", + "arguments": [ + "getTargetResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/scheduler/snapshot/CreateVolumeSnapshotGroupJob.java" + }, { "raw": "volume[uuid:%s] is deleted, state change is not allowed", "en_US": "volume[uuid:{0}] is deleted, state change is not allowed", @@ -4774,6 +5346,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/backup/imagestore/ImageStoreBackupStorage.java" }, + { + "raw": "the uuid of imagestoreBackupStorage agent changed[expected:%s, actual:%s], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "en_US": "the uuid of imagestoreBackupStorage agent changed[expected:{0}, actual:{1}], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "zh_CN": "ImageStoreBackupStorage的代理的uuid已改变[期望:{0},实际:{1}],这可能是代理被手动重启了。请重新连接以同步状态", + "arguments": [ + "self.getUuid()", + "ret.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/backup/imagestore/ImageStoreBackupStorage.java" + }, { "raw": "get image hash failed, because:%s", "en_US": "get image hash failed, because:{0}", @@ -4882,6 +5464,15 @@ ], "fileName": "src/main/java/org/zstack/storage/backup/imagestore/ImageStoreBackupStorageApiInterceptor.java" }, + { + "raw": "the password for the physical machine [%s] is empty. please set a password", + "en_US": "the password for the physical machine [{0}] is empty. please set a password", + "zh_CN": "主机[%s]的密码为空。请设置密码", + "arguments": [ + "msg.getHostname()" + ], + "fileName": "src/main/java/org/zstack/storage/backup/imagestore/ImageStoreBackupStorageApiInterceptor.java" + }, { "raw": "cannot find a connected host in cluster to which PS [uuid: %s] attached", "en_US": "cannot find a connected host in cluster to which PS [uuid: {0}] attached", @@ -5224,6 +5815,26 @@ ], "fileName": "src/main/java/org/zstack/storage/migration/StorageMigrationApiInterceptor.java" }, + { + "raw": "can not migrate volume[%s], because volume state is Disabled", + "en_US": "can not migrate volume[{0}], because volume state is Disabled", + "zh_CN": "不能迁移硬盘[uuid:{0}],硬盘状态为禁用", + "arguments": [ + "msg.getVolumeUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/migration/StorageMigrationApiInterceptor.java" + }, + { + "raw": "there are not enough capacity for vm[uuid: %s] storage migration, required capacity(include image cache): %s, current available physical capacity: %s", + "en_US": "there are not enough capacity for vm[uuid: {0}] storage migration, required capacity(include image cache): {1}, current available physical capacity: {2}", + "zh_CN": "不能为虚拟机[uuid:{0}]进行存储迁移,所需容量(包括镜像缓存):{1},当前可用物理容量:{2}", + "arguments": [ + "msg.getVmInstanceUuid()", + "size", + "dstPrimaryStorageVO.getCapacity().getAvailablePhysicalCapacity()" + ], + "fileName": "src/main/java/org/zstack/storage/migration/StorageMigrationBase.java" + }, { "raw": "not support vm state[%s] to do storage migration", "en_US": "not support vm state[{0}] to do storage migration", @@ -5339,6 +5950,18 @@ ], "fileName": "src/main/java/org/zstack/storage/migration/primary/ceph/CephToCephMigrateVolumeFlow.java" }, + { + "raw": "found trashId(%s) in primaryStorage [%s] for the migrate installPath[%s]. please clean it first by \u0027APICleanUpTrashOnPrimaryStorageMsg\u0027 if you insist to migrate the volume[%s]", + "en_US": "found trashId({0}) in primaryStorage [{1}] for the migrate installPath[{2}]. please clean it first by \u0027APICleanUpTrashOnPrimaryStorageMsg\u0027 if you insist to migrate the volume[{3}]", + "zh_CN": "在主存储[{1}]的回收数据({0})中己存在要迁移的目标路径[{2}],如果要继续迁移卷[{3}],请先调用\u0027APICleanUpTrashOnPrimaryStorageMsg\u0027来手动清理该回收数据", + "arguments": [ + "re.getTrashId()", + "srcPsUuid", + "srcVolumeFolderPath", + "re.getResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/migration/primary/local/LocalToLocalMigrateVolumeFlow.java" + }, { "raw": "cannot find any connected host to perform the storage migration operation", "en_US": "cannot find any connected host to perform the storage migration operation", @@ -5370,6 +5993,17 @@ ], "fileName": "src/main/java/org/zstack/storage/migration/primary/nfs/NfsToNfsMigrateVolumeFlow.java" }, + { + "raw": "volume[uuid:%s] has image[uuid:%s] dependency, other dependency image[%s]", + "en_US": "volume[uuid:{0}] has image[uuid:{1}] dependency, other dependency image[{2}]", + "zh_CN": "盘[uuid:{0}]有镜像[uuid:{1}]依赖,其他依赖镜像有[{2}]", + "arguments": [ + "volumeUuid", + "r.getImageUuid()", + "r.getOtherImageUuids()" + ], + "fileName": "src/main/java/org/zstack/storage/migration/primary/nfs/NfsToNfsMigrateVolumeFlow.java" + }, { "raw": "image [uuid:%s] has been deleted", "en_US": "image [uuid:{0}] has been deleted", @@ -5415,6 +6049,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/imagestore/ceph/CephPrimaryToImageStoreBackupStorageMediatorImpl.java" }, + { + "raw": "The source vm has local data volume on host[uuid: %s],but in fast clone api msg try to clone vm to host[%s], which is impossible for fast clone feature.", + "en_US": "The source vm has local data volume on host[uuid: {0}],but in fast clone api msg try to clone vm to host[{1}], which is impossible for fast clone feature.", + "zh_CN": "源虚拟机有本地硬盘,但是该虚拟机在快照克隆API消息中尝试将虚拟机克隆到主机[{0}]失败: 快照克隆功能不支持该操作", + "arguments": [ + "hostUuid", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/imagestore/local/LocalStorageImageStoreApiInterceptor.java" + }, { "raw": "System can\u0027t find imagestore backup Storage. Please do not set imagestore backup Storage server IP to localhost(127.*.*.*),", "en_US": "System can\u0027t find imagestore backup Storage. Please do not set imagestore backup Storage server IP to localhost(127.*.*.*),", @@ -5519,6 +6163,40 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/license/XskyLicenseInfoFactory.java" }, + { + "raw": "the current primaryStorage %s does not have a third-party token set, and the block volume cannot be created temporarily", + "en_US": "the current primaryStorage {0} does not have a third-party token set, and the block volume cannot be created temporarily", + "zh_CN": "当前PrimaryStorage [{0}] 没有第三方令牌设置,暂时无法创建块设备", + "arguments": [ + "msg.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, + { + "raw": "the current primaryStorage %s is not Ceph type, can not get access path", + "en_US": "the current primaryStorage {0} is not Ceph type, can not get access path", + "zh_CN": "当前PrimaryStorage [{0}] 不是Ceph类型,无法获取访问路径", + "arguments": [ + "primaryStorageUuid" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, + { + "raw": "Ceph type block volume accessPathId, accessPathIqn cannot be null", + "en_US": "Ceph type block volume accessPathId, accessPathIqn cannot be null", + "zh_CN": "Ceph 类型块设备访问路径ID,访问路径 IQN 不能为空", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, + { + "raw": "current primary storage type not support block volume, supporttype has %s", + "en_US": "current primary storage type not support block volume, supporttype has {0}", + "zh_CN": "当前PrimaryStorage类型不支持块设备,支持类型有 {0}", + "arguments": [ + "ALLOW_BLOCK_VENDER_TYPES" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, { "raw": "no block volume factory found for vendor: %s", "en_US": "no block volume factory found for vendor: {0}", @@ -5528,6 +6206,15 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" }, + { + "raw": "name[%s] is invalid, the name requirement: 1~128 characters, support uppercase and lowercase letters, numbers, underscores, and hyphens; It can only start with uppercase and lowercase letters; It does not start or end with a space ", + "en_US": "name[{0}] is invalid, the name requirement: 1~128 characters, support uppercase and lowercase letters, numbers, underscores, and hyphens; It can only start with uppercase and lowercase letters; It does not start or end with a space ", + "zh_CN": "名称[{0}]无效,名称要求:1~128个字符,支持大写和小写字母、数字、下划线、连字符,只能以大写或小写字母开头,不能以空格开头或结尾", + "arguments": [ + "name" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, { "raw": "iothread need qemu version \u003e\u003d %s, but %s on host[%s].", "en_US": "iothread need qemu version \u003e\u003d {0}, but {1} on host[{2}].", @@ -5570,6 +6257,26 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" }, + { + "raw": "snapshot validation is unsupported for volume[uuid: %s]. Volume should be attached to vm", + "en_US": "snapshot validation is unsupported for volume[uuid: {0}]. Volume should be attached to vm", + "zh_CN": "快照验证不支持盘[uuid:{0}]。硬盘应添加到虚拟机上", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, + { + "raw": "snapshot validation is unsupported for volume[uuid: %s]. Attached vm is not in state of [%s, %s]", + "en_US": "snapshot validation is unsupported for volume[uuid: {0}]. Attached vm is not in state of [{1}, {2}]", + "zh_CN": "快照验证不支持硬盘[uuid:{0}]。相关的虚拟机不在状态[{1}, {2}]中", + "arguments": [ + "msg.getUuid()", + "VmInstanceState.Running", + "VmInstanceState.Paused" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, { "raw": "volume[uuid:%s] can not found", "en_US": "volume[uuid:{0}] can not found", @@ -5619,6 +6326,15 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" }, + { + "raw": "can not resize volume[%s], because volume state is Disabled", + "en_US": "can not resize volume[{0}], because volume state is Disabled", + "zh_CN": "不能调整硬盘[uuid:{0}]的大小,因为硬盘状态为禁用", + "arguments": [ + "msg.getVolumeUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeMevocoApiInterceptor.java" + }, { "raw": "At least one of vmInstanceUuid or uuid should be set", "en_US": "At least one of vmInstanceUuid or uuid should be set", @@ -5737,6 +6453,35 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/block/XskyPrimaryStorageBackendFactory.java" }, + { + "raw": "name: [%s] already exists, block volume name cannot be duplicated on type[%s] primarystorage", + "en_US": "name: [{0}] already exists, block volume name cannot be duplicated on type[{1}] primarystorage", + "zh_CN": "名称[uuid:{0}]已存在,块硬盘名称不能在类型[uuid:{1}]主存储上重复", + "arguments": [ + "volumeName", + "vo.getType()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/block/expon/ExponBlockVolumeFactory.java" + }, + { + "raw": "[protocol] parameter is null, type[%s] primarystorage must set block volume protocol", + "en_US": "[protocol] parameter is null, type[{0}] primarystorage must set block volume protocol", + "zh_CN": "参数为空,类型[{0}]的主存储必须设置块硬盘协议", + "arguments": [ + "vo.getType()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/block/expon/ExponBlockVolumeFactory.java" + }, + { + "raw": "current [%s] primary storage not support [%s] type protocol, please add protocol to storage first", + "en_US": "current [{0}] primary storage not support [{1}] type protocol, please add protocol to storage first", + "zh_CN": "当前[{0}]主存储不支持[{1}]类型协议,请先添加协议", + "arguments": [ + "vo.getUuid()", + "protocol" + ], + "fileName": "src/main/java/org/zstack/storage/volume/block/expon/ExponBlockVolumeFactory.java" + }, { "raw": "ExponBlockVolume[uuid:%s] not found", "en_US": "ExponBlockVolume[uuid:{0}] not found", @@ -6004,6 +6749,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/usbDevice/UsbDeviceManager.java" }, + { + "raw": "cannot attach the usb device[uuid:%s] to vm[uuid:%s], possibly reasons include: the device is not enabled or had been attached to a vm, or the device and the vm are not on same host.", + "en_US": "cannot attach the usb device[uuid:{0}] to vm[uuid:{1}], possibly reasons include: the device is not enabled or had been attached to a vm, or the device and the vm are not on same host.", + "zh_CN": "不能将USB设备[uuid:{0}]绑定到虚拟机[uuid:{1}],可能原因包括:设备未启用或已绑定到虚拟机,或设备和虚拟机不在同一主机上。", + "arguments": [ + "msg.getUsbDeviceUuid()", + "msg.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/usbDevice/UsbDeviceManager.java" + }, { "raw": "usb is already bound to vm[uuid:%s] and cannot be bound to other vm", "en_US": "usb is already bound to vm[uuid:{0}] and cannot be bound to other vm", @@ -6231,6 +6986,16 @@ ], "fileName": "src/main/java/org/zstack/vmware/ESXHostFactory.java" }, + { + "raw": "console password is not supported by vm[uuid:%s] on ESXHost[ESXI version:%s]", + "en_US": "console password is not supported by vm[uuid:{0}] on ESXHost[ESXI version:{1}]", + "zh_CN": "虚拟机[uuid:{0}]在ESX主机[ESXI版本:{1}]上不支持控制台密码", + "arguments": [ + "msg.getUuid()", + "esxVersion" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterApiInterceptor.java" + }, { "raw": "vCenter login name expected.", "en_US": "vCenter login name expected.", @@ -6457,6 +7222,45 @@ ], "fileName": "src/main/java/org/zstack/vmware/VCenterManagerImpl.java" }, + { + "raw": "Login to vCenter [%s] failed with user [%s],please check your network connection and credential.", + "en_US": "Login to vCenter [{0}] failed with user [{1}],please check your network connection and credential.", + "zh_CN": "登录到VCenter[{0}]失败,用户名[{1}],请检查网络连接和密码。", + "arguments": [ + "msg.getDomainName()", + "msg.getUsername()" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterManagerImpl.java" + }, + { + "raw": "Parse response failed from vCenter [%s],please check the port number[%d].", + "en_US": "Parse response failed from vCenter [{0}],please check the port number[{1}].", + "zh_CN": "从 vCenter {0} 解析响应失败,请检查端口号[{1}]", + "arguments": [ + "msg.getDomainName()", + "msg.getPort() \u003d\u003d null ? 443 : msg.getPort()" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterManagerImpl.java" + }, + { + "raw": "SSL handshake failed with vCenter [%s],because insecure TLS 1.0 is used. Manually enabled TLS 1.0 in jdk configuration if needed.", + "en_US": "SSL handshake failed with vCenter [{0}],because insecure TLS 1.0 is used. Manually enabled TLS 1.0 in jdk configuration if needed.", + "zh_CN": "和 vCenter {0} 的 SSL 握手失败,请检查端口号[{1}],是否使用不安全的TLS 1.0。如果需要,请手动启用jdk中的TLS 1.0。", + "arguments": [ + "msg.getDomainName()" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterManagerImpl.java" + }, + { + "raw": "SSL handshake failed with vCenter [%s],please check the port number[%d].", + "en_US": "SSL handshake failed with vCenter [{0}],please check the port number[{1}].", + "zh_CN": "和 vCenter {0} 的 SSL 握手失败,请检查端口号[{1}]。", + "arguments": [ + "msg.getDomainName()", + "msg.getPort() \u003d\u003d null ? 443 : msg.getPort()" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterManagerImpl.java" + }, { "raw": "No clustered compute resource found", "en_US": "No clustered compute resource found", @@ -6575,6 +7379,16 @@ ], "fileName": "src/main/java/org/zstack/vmware/VCenterServiceInstanceManagerImpl.java" }, + { + "raw": "VCenter[uuid\u003d%s] is Disabled. You can only perform read-only operations on this VCenter. If you want to make configuration changes to it, you need to update config by UpdateVCenterAction {uuid\u003d%s state\u003dEnabled}", + "en_US": "VCenter[uuid\u003d{0}] is Disabled. You can only perform read-only operations on this VCenter. If you want to make configuration changes to it, you need to update config by UpdateVCenterAction {uuid\u003d{1} state\u003dEnabled}", + "zh_CN": "vCenter[{0}]已禁用,只能对它执行只读操作。如果您想对它进行配置更改,请使用UpdateVCenterAction {uuid\u003d{1} state\u003dEnabled}进行更新。", + "arguments": [ + "vcenterUuid", + "vcenterUuid" + ], + "fileName": "src/main/java/org/zstack/vmware/VCenterStateApiInterceptor.java" + }, { "raw": "VCenter[uuid\u003d%s] are Disabled. You can only perform read-only operations on these VCenter.", "en_US": "VCenter[uuid\u003d{0}] are Disabled. You can only perform read-only operations on these VCenter.", diff --git a/conf/i18n_json/i18n_ministorage.json b/conf/i18n_json/i18n_ministorage.json index 5035b4ae4bc..ada918e7754 100644 --- a/conf/i18n_json/i18n_ministorage.json +++ b/conf/i18n_json/i18n_ministorage.json @@ -15,6 +15,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageApiInterceptor.java" }, + { + "raw": "volume[uuid:%s] has been attached some VM(s)[uuid:%s] which are not Stopped and not running on the specific host.", + "en_US": "volume[uuid:{0}] has been attached some VM(s)[uuid:{1}] which are not Stopped and not running on the specific host.", + "zh_CN": "", + "arguments": [ + "volume.getUuid()", + "runningVmUuids.toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageApiInterceptor.java" + }, { "raw": "VM[uuid:%s] are not Stopped and not running on the specific host.", "en_US": "VM[uuid:{0}] are not Stopped and not running on the specific host.", @@ -53,6 +63,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" }, + { + "raw": "the mini storage[uuid:%s, name:%s] cannot find any available host in attached clusters for instantiating the volume", + "en_US": "the mini storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" + }, { "raw": "can not determine which host", "en_US": "can not determine which host", @@ -60,6 +80,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" }, + { + "raw": "the primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" + }, { "raw": "no connected host found, mini storage failed", "en_US": "no connected host found, mini storage failed", @@ -84,6 +114,18 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" }, + { + "raw": "host[uuid: %s] of mini primary storage[uuid: %s] doesn\u0027t have enough capacity[current: %s bytes, needed: %s]", + "en_US": "host[uuid: {0}] of mini primary storage[uuid: {1}] doesn\u0027t have enough capacity[current: {2} bytes, needed: {3}]", + "zh_CN": "", + "arguments": [ + "hostUuid", + "self.getUuid()", + "ref.getAvailableCapacity()", + "size" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageBase.java" + }, { "raw": "the host[uuid:%s] is not connected", "en_US": "the host[uuid:{0}] is not connected", @@ -168,6 +210,34 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageKvmCommandSender.java" }, + { + "raw": "can not find any available host to resize volume[uuid: %s] on mini storage[uuid: %s]", + "en_US": "can not find any available host to resize volume[uuid: {0}] on mini storage[uuid: {1}]", + "zh_CN": "", + "arguments": [ + "volume.getUuid()", + "volume.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageKvmFactory.java" + }, + { + "raw": "volume[uuid:%s] replication is syncing data, please wait until it is finished.", + "en_US": "volume[uuid:{0}] replication is syncing data, please wait until it is finished.", + "zh_CN": "", + "arguments": [ + "volumeUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageKvmFactory.java" + }, + { + "raw": "replication network status of volume[uuid:%s] run into StandAlone, but host are all Connected, please recover it first.", + "en_US": "replication network status of volume[uuid:{0}] run into StandAlone, but host are all Connected, please recover it first.", + "zh_CN": "", + "arguments": [ + "volumeUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/ministorage/MiniStorageKvmFactory.java" + }, { "raw": "Invalid path string %s", "en_US": "Invalid path string {0}", diff --git a/conf/i18n_json/i18n_network.json b/conf/i18n_json/i18n_network.json index 18c2d4fed30..886b28ec8ce 100644 --- a/conf/i18n_json/i18n_network.json +++ b/conf/i18n_json/i18n_network.json @@ -9,6 +9,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" }, + { + "raw": "could not attach l2 network, because there is another network [uuid:%s] on physical interface [%s] with different vswitch type", + "en_US": "could not attach l2 network, because there is another network [uuid:{0}] on physical interface [{1}] with different vswitch type", + "zh_CN": "", + "arguments": [ + "otherL2s.get(0)", + "l2.getPhysicalInterface()" + ], + "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" + }, { "raw": "l2Network[uuid:%s] has not attached to cluster[uuid:%s]", "en_US": "l2Network[uuid:{0}] has not attached to cluster[uuid:{1}]", @@ -28,6 +38,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" }, + { + "raw": "could not attach l2Network[uuid:%s] to host[uuid:%s] which is in the premaintenance or maintenance state", + "en_US": "could not attach l2Network[uuid:{0}] to host[uuid:{1}] which is in the premaintenance or maintenance state", + "zh_CN": "", + "arguments": [ + "msg.getL2NetworkUuid()", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" + }, { "raw": "invalid json format, causes: %s", "en_US": "invalid json format, causes: {0}", @@ -84,6 +104,15 @@ ], "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" }, + { + "raw": "cannot update virtual network id for l2Network[uuid:%s] because it only supports an L2Network that is exclusively attached to a kvm cluster", + "en_US": "cannot update virtual network id for l2Network[uuid:{0}] because it only supports an L2Network that is exclusively attached to a kvm cluster", + "zh_CN": "", + "arguments": [ + "l2.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/L2NetworkApiInterceptor.java" + }, { "raw": "there\u0027s no host in cluster[uuid: %s], but hostParams is set", "en_US": "there\u0027s no host in cluster[uuid: {0}], but hostParams is set", @@ -222,6 +251,106 @@ ], "fileName": "src/main/java/org/zstack/network/l3/L3BasicNetwork.java" }, + { + "raw": "could not delete ip address, because it\u0027s used by vmnic[uuid:%s]", + "en_US": "could not delete ip address, because it\u0027s used by vmnic[uuid:{0}]", + "zh_CN": "", + "arguments": [ + "vo.getVmNicUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because start ip[%s] is not valid ip address", + "en_US": "could not reserve ip range, because start ip[{0}] is not valid ip address", + "zh_CN": "", + "arguments": [ + "msg.getStartIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because end ip[%s] is not valid ip address", + "en_US": "could not reserve ip range, because end ip[{0}] is not valid ip address", + "zh_CN": "", + "arguments": [ + "msg.getEndIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because end ip[%s] is not ipv4 address", + "en_US": "could not reserve ip range, because end ip[{0}] is not ipv4 address", + "zh_CN": "", + "arguments": [ + "msg.getEndIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because end ip[%s] is not ipv6 address", + "en_US": "could not reserve ip range, because end ip[{0}] is not ipv6 address", + "zh_CN": "", + "arguments": [ + "msg.getEndIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because end ip[%s] is less than start ip[%s]", + "en_US": "could not reserve ip range, because end ip[{0}] is less than start ip[{1}]", + "zh_CN": "", + "arguments": [ + "msg.getEndIp()", + "msg.getStartIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because there is no ipv4 range", + "en_US": "could not reserve ip range, because there is no ipv4 range", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because there is no ipv6 range", + "en_US": "could not reserve ip range, because there is no ipv6 range", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because reserve ip is not in ip range[%s]", + "en_US": "could not reserve ip range, because reserve ip is not in ip range[{0}]", + "zh_CN": "", + "arguments": [ + "ipv6Ranges.get(0).getNetworkCidr()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not reserve ip range, because new range [%s:%s] is overlapped with old range [%s:%s]", + "en_US": "could not reserve ip range, because new range [{0}:{1}] is overlapped with old range [{2}:{3}]", + "zh_CN": "", + "arguments": [ + "msg.getStartIp()", + "msg.getEndIp()", + "reserveRange.getStartIp()", + "reserveRange.getEndIp()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, + { + "raw": "could not set mtu because l2 network[uuid:%s] of l3 network [uuid:%s] mtu can not be bigger than the novlan network", + "en_US": "could not set mtu because l2 network[uuid:{0}] of l3 network [uuid:{1}] mtu can not be bigger than the novlan network", + "zh_CN": "", + "arguments": [ + "l2VO.getUuid()", + "msg.getL3NetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, { "raw": "can not delete the last normal ip range because there is still has address pool", "en_US": "can not delete the last normal ip range because there is still has address pool", @@ -462,6 +591,15 @@ ], "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" }, + { + "raw": "not valid combination of system and category,only %s are valid", + "en_US": "not valid combination of system and category,only {0} are valid", + "zh_CN": "", + "arguments": [ + "L3NetworkCategory.validCombination" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, { "raw": "l3 network [uuid %s: name %s] is not a public network, address pool range can not be added", "en_US": "l3 network [uuid {0}: name {1}] is not a public network, address pool range can not be added", @@ -577,6 +715,17 @@ ], "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" }, + { + "raw": "multiple CIDR on the same L3 network is not allowed. There has been a IP range[uuid:%s, CIDR:%s], the new IP range[CIDR:%s] is not in the CIDR with the existing one", + "en_US": "multiple CIDR on the same L3 network is not allowed. There has been a IP range[uuid:{0}, CIDR:{1}], the new IP range[CIDR:{2}] is not in the CIDR with the existing one", + "zh_CN": "", + "arguments": [ + "r.getUuid()", + "rcidr", + "cidr" + ], + "fileName": "src/main/java/org/zstack/network/l3/L3NetworkApiInterceptor.java" + }, { "raw": "the endip[%s] is not in the subnet %s/%s", "en_US": "the endip[{0}] is not in the subnet {1}/{2}", diff --git a/conf/i18n_json/i18n_nfsPrimaryStorage.json b/conf/i18n_json/i18n_nfsPrimaryStorage.json index ae80bd0c714..d2d3ad48bfc 100644 --- a/conf/i18n_json/i18n_nfsPrimaryStorage.json +++ b/conf/i18n_json/i18n_nfsPrimaryStorage.json @@ -42,6 +42,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsApiParamChecker.java" }, + { + "raw": "there are %s running VMs on the NFS primary storage, please stop them and try again:\\n%s\\n", + "en_US": "there are {0} running VMs on the NFS primary storage, please stop them and try again:\\n{1}\\n", + "zh_CN": "", + "arguments": [ + "vms.size()", + "StringUtils.join(vms, \"\\n\")" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsApiParamChecker.java" + }, { "raw": "cannot find usable backend", "en_US": "cannot find usable backend", @@ -56,6 +66,31 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" }, + { + "raw": "no host in Connected status to which nfs primary storage[uuid:%s, name:%s] attached found to revert volume[uuid:%s] to snapshot[uuid:%s, name:%s]", + "en_US": "no host in Connected status to which nfs primary storage[uuid:{0}, name:{1}] attached found to revert volume[uuid:{2}] to snapshot[uuid:{3}, name:{4}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()", + "msg.getVolume().getUuid()", + "msg.getSnapshot().getUuid()", + "msg.getSnapshot().getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, + { + "raw": "no host in Connected status to which nfs primary storage[uuid:%s, name:%s] attached found to revert volume[uuid:%s] to image[uuid:%s]", + "en_US": "no host in Connected status to which nfs primary storage[uuid:{0}, name:{1}] attached found to revert volume[uuid:{2}] to image[uuid:{3}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()", + "msg.getVolume().getUuid()", + "msg.getVolume().getRootImageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, { "raw": "vm[uuid:%s] is not Running, Paused or Stopped, current state is %s", "en_US": "vm[uuid:{0}] is not Running, Paused or Stopped, current state is {1}", @@ -75,6 +110,37 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" }, + { + "raw": "the NFS primary storage[uuid:%s, name:%s] cannot find any usable host to create the data volume[uuid:%s, name:%s]", + "en_US": "the NFS primary storage[uuid:{0}, name:{1}] cannot find any usable host to create the data volume[uuid:{2}, name:{3}]", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()", + "msg.getVolume().getUuid()", + "msg.getVolume().getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, + { + "raw": "the NFS primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the NFS primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, + { + "raw": "cannot find available host for operation on primary storage[uuid:%s].", + "en_US": "cannot find available host for operation on primary storage[uuid:{0}].", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, { "raw": "host where vm[uuid:%s] locate is not Connected.", "en_US": "host where vm[uuid:{0}] locate is not Connected.", @@ -84,6 +150,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" }, + { + "raw": "volume[uuid:%s] has reference volume[%s], can not change volume type before flatten them and their descendants", + "en_US": "volume[uuid:{0}] has reference volume[{1}], can not change volume type before flatten them and their descendants", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "infos.toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" + }, { "raw": "the NFS primary storage[uuid:%s, name:%s] cannot find hosts in attached clusters to perform the operation", "en_US": "the NFS primary storage[uuid:{0}, name:{1}] cannot find hosts in attached clusters to perform the operation", @@ -103,6 +179,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java" }, + { + "raw": "the NFS primary storage[uuid:%s] is not attached to any clusters, and cannot expunge the root volume[uuid:%s] of the VM[uuid:%s]", + "en_US": "the NFS primary storage[uuid:{0}] is not attached to any clusters, and cannot expunge the root volume[uuid:{1}] of the VM[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "psUuid", + "vmUuid", + "volumeUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageFactory.java" + }, { "raw": "cannot find a connected host in cluster which ps [uuid: %s] attached", "en_US": "cannot find a connected host in cluster which ps [uuid: {0}] attached", @@ -121,6 +208,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageFactory.java" }, + { + "raw": "cannot find a host which has Connected host-NFS connection to execute command for nfs primary storage[uuid:%s]", + "en_US": "cannot find a host which has Connected host-NFS connection to execute command for nfs primary storage[uuid:{0}]", + "zh_CN": "", + "arguments": [ + "pri.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageFactory.java" + }, { "raw": "cannot find proper hypervisorType for primary storage[uuid:%s] to handle image format or volume format[%s]", "en_US": "cannot find proper hypervisorType for primary storage[uuid:{0}] to handle image format or volume format[{1}]", @@ -131,6 +227,21 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageFactory.java" }, + { + "raw": "unable to attach a primary storage[uuid:%s, name:%s] to cluster[uuid:%s]. Kvm host in the cluster has qemu-img with version[%s]; but the primary storage has attached to another cluster that has kvm host which has qemu-img with version[%s]. qemu-img version greater than %s is incompatible with versions less than %s, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "en_US": "unable to attach a primary storage[uuid:{0}, name:{1}] to cluster[uuid:{2}]. Kvm host in the cluster has qemu-img with version[{3}]; but the primary storage has attached to another cluster that has kvm host which has qemu-img with version[{4}]. qemu-img version greater than {5} is incompatible with versions less than {6}, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "zh_CN": "", + "arguments": [ + "inv.getUuid()", + "inv.getName()", + "clusterUuid", + "versionInCluster", + "otherVersion", + "QCOW3_QEMU_IMG_VERSION", + "QCOW3_QEMU_IMG_VERSION" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" + }, { "raw": "unable to create folder[installUrl:%s] on kvm host[uuid:%s, ip:%s], because %s", "en_US": "unable to create folder[installUrl:{0}] on kvm host[uuid:{1}, ip:{2}], because {3}", @@ -152,6 +263,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" }, + { + "raw": "failed to ping nfs primary storage[uuid:%s] from host[uuid:%s],because %s. disconnect this host-ps connection", + "en_US": "failed to ping nfs primary storage[uuid:{0}] from host[uuid:{1}],because {2}. disconnect this host-ps connection", + "zh_CN": "", + "arguments": [ + "psInv.getUuid()", + "huuid", + "reply.isSuccess() ? rsp.getError() : reply.getError()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" + }, { "raw": "The chosen host[uuid:%s] to perform storage migration is lost", "en_US": "The chosen host[uuid:{0}] to perform storage migration is lost", @@ -181,6 +303,21 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" }, + { + "raw": "unable to attach a primary storage to cluster. Kvm host[uuid:%s, name:%s] in cluster has qemu-img with version[%s]; but the primary storage has attached to a cluster that has kvm host[uuid:%s], which has qemu-img with version[%s]. qemu-img version greater than %s is incompatible with versions less than %s, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "en_US": "unable to attach a primary storage to cluster. Kvm host[uuid:{0}, name:{1}] in cluster has qemu-img with version[{2}]; but the primary storage has attached to a cluster that has kvm host[uuid:{3}], which has qemu-img with version[{4}]. qemu-img version greater than {5} is incompatible with versions less than {6}, this will causes volume snapshot operation to fail. Please avoid attaching a primary storage to clusters that have different Linux distributions, in order to prevent qemu-img version mismatch", + "zh_CN": "", + "arguments": [ + "context.getInventory().getUuid()", + "context.getInventory().getName()", + "mine", + "e.getKey()", + "version", + "QCOW3_QEMU_IMG_VERSION", + "QCOW3_QEMU_IMG_VERSION" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" + }, { "raw": "unable to create empty volume[uuid:%s, name:%s] on kvm host[uuid:%s, ip:%s], because %s", "en_US": "unable to create empty volume[uuid:{0}, name:{1}] on kvm host[uuid:{2}, ip:{3}], because {4}", @@ -194,6 +331,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" }, + { + "raw": "failed to delete bits[%s] on nfs primary storage[uuid:%s], %s, will clean up installPath, pinv.getUuid(), rsp.getError()", + "en_US": "failed to delete bits[{0}] on nfs primary storage[uuid:{1}], {2}, will clean up installPath, pinv.getUuid(), rsp.getError()", + "zh_CN": "", + "arguments": [ + "installPath", + "pinv.getUuid()", + "rsp.getError()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java" + }, { "raw": "failed to revert volume[uuid:%s] to snapshot[uuid:%s] on kvm host[uuid:%s, ip:%s], %s", "en_US": "failed to revert volume[uuid:{0}] to snapshot[uuid:{1}] on kvm host[uuid:{2}, ip:{3}], {4}", diff --git a/conf/i18n_json/i18n_ovf.json b/conf/i18n_json/i18n_ovf.json index 548f6717224..6399f03faec 100644 --- a/conf/i18n_json/i18n_ovf.json +++ b/conf/i18n_json/i18n_ovf.json @@ -132,7 +132,7 @@ { "raw": "invalid image info[fileName\u003d%s]", "en_US": "invalid image info[fileName\u003d{0}]", - "zh_CN": "非法镜像信息 [fileName={0}]", + "zh_CN": "非法镜像信息 [fileName\u003d{0}]", "arguments": [ "param.getFileName()" ], @@ -207,6 +207,17 @@ "arguments": [], "fileName": "src/main/java/org/zstack/ovf/OvfInterceptor.java" }, + { + "raw": "backup storage[uuid: %s] does not have enough available capacity for exporting vm[uuid: %s], required capacity is: %d", + "en_US": "backup storage[uuid: {0}] does not have enough available capacity for exporting vm[uuid: {1}], required capacity is: {2}", + "zh_CN": "", + "arguments": [ + "msg.getBackupStorageUuid()", + "msg.getVmUuid()", + "totalSize" + ], + "fileName": "src/main/java/org/zstack/ovf/OvfManagerImpl.java" + }, { "raw": "failed to parse OVF XML string", "en_US": "failed to parse OVF XML string", @@ -283,6 +294,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/ovf/datatype/CreateVmFromOvfBundle.java" }, + { + "raw": "failed to create ovf bundle: Neither the OVF file nor the custom API has set the size of the root disk, so unable to allocate root disk. You should set root disk size in CreateVmInstanceFromOvfAction.jsonCreateVmParam.rootDiskSize", + "en_US": "failed to create ovf bundle: Neither the OVF file nor the custom API has set the size of the root disk, so unable to allocate root disk. You should set root disk size in CreateVmInstanceFromOvfAction.jsonCreateVmParam.rootDiskSize", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/ovf/datatype/CreateVmFromOvfBundle.java" + }, { "raw": "failed to create ovf bundle", "en_US": "failed to create ovf bundle", diff --git a/conf/i18n_json/i18n_portForwarding.json b/conf/i18n_json/i18n_portForwarding.json index eae09815d41..5d095fabf7b 100644 --- a/conf/i18n_json/i18n_portForwarding.json +++ b/conf/i18n_json/i18n_portForwarding.json @@ -153,6 +153,30 @@ ], "fileName": "src/main/java/org/zstack/network/service/portforwarding/PortForwardingApiInterceptor.java" }, + { + "raw": "could not attach port forwarding rule with allowedCidr, because vmNic[uuid:%s] already has rules that overlap the target private port ranges[%s, %s] and have the same protocol type[%s]", + "en_US": "could not attach port forwarding rule with allowedCidr, because vmNic[uuid:{0}] already has rules that overlap the target private port ranges[{1}, {2}] and have the same protocol type[{3}]", + "zh_CN": "不允许使用 AllowedCIDR 规则添加端口转发,因为 vmnic[uuid:{0}] 已经有重叠的端口转发规则[私网端口范围:{1}, {2}],且协议类型相同[{3}]", + "arguments": [ + "vmNicUuid", + "privatePortStart", + "privatePortEnd", + "protocolType" + ], + "fileName": "src/main/java/org/zstack/network/service/portforwarding/PortForwardingApiInterceptor.java" + }, + { + "raw": "could not attach port forwarding rule, because vmNic[uuid:%s] already has a rule that overlaps the target private port ranges[%s, %s], has the same protocol type[%s] and has AllowedCidr", + "en_US": "could not attach port forwarding rule, because vmNic[uuid:{0}] already has a rule that overlaps the target private port ranges[{1}, {2}], has the same protocol type[{3}] and has AllowedCidr", + "zh_CN": "不允许添加端口转发,因为 vmnic[uuid:{0}] 已经有重叠的端口转发规则[私网端口范围:{1}, {2}], 且协议类型相同[{3}]", + "arguments": [ + "vmNicUuid", + "privatePortStart", + "privatePortEnd", + "protocolType" + ], + "fileName": "src/main/java/org/zstack/network/service/portforwarding/PortForwardingApiInterceptor.java" + }, { "raw": "unable to create port forwarding rule, extension[%s] refused it because %s", "en_US": "unable to create port forwarding rule, extension[{0}] refused it because {1}", diff --git a/conf/i18n_json/i18n_portal.json b/conf/i18n_json/i18n_portal.json index 157305df925..a6af38e67bd 100644 --- a/conf/i18n_json/i18n_portal.json +++ b/conf/i18n_json/i18n_portal.json @@ -17,6 +17,15 @@ ], "fileName": "src/main/java/org/zstack/portal/apimediator/ApiMediatorImpl.java" }, + { + "raw": "resourceUuid[%s] is not a valid uuid. A valid uuid is a UUID(v4 recommended) with \u0027-\u0027 stripped. see http://en.wikipedia.org/wiki/Universally_unique_identifier for format of UUID, the regular expression uses to validate a UUID is \u0027[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}\u0027", + "en_US": "resourceUuid[{0}] is not a valid uuid. A valid uuid is a UUID(v4 recommended) with \u0027-\u0027 stripped. see http://en.wikipedia.org/wiki/Universally_unique_identifier for format of UUID, the regular expression uses to validate a UUID is \u0027[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}\u0027", + "zh_CN": "", + "arguments": [ + "cmsg.getResourceUuid()" + ], + "fileName": "src/main/java/org/zstack/portal/apimediator/ApiMediatorImpl.java" + }, { "raw": "invalid value[%s] of field[%s]", "en_US": "invalid value[{0}] of field[{1}]", diff --git a/conf/i18n_json/i18n_resourceconfig.json b/conf/i18n_json/i18n_resourceconfig.json index 74f8390f217..77e3edc8c6d 100644 --- a/conf/i18n_json/i18n_resourceconfig.json +++ b/conf/i18n_json/i18n_resourceconfig.json @@ -17,6 +17,17 @@ ], "fileName": "src/main/java/org/zstack/resourceconfig/ResourceConfig.java" }, + { + "raw": "ResourceConfig [category:%s, name:%s] cannot bind to resourceType: %s", + "en_US": "ResourceConfig [category:{0}, name:{1}] cannot bind to resourceType: {2}", + "zh_CN": "资源配置[类别:{0},名称:{1}]无法绑定资源类型:{2}", + "arguments": [ + "globalConfig.getCategory()", + "globalConfig.getName()", + "resourceType" + ], + "fileName": "src/main/java/org/zstack/resourceconfig/ResourceConfig.java" + }, { "raw": "no global config[category:%s, name:%s] found", "en_US": "no global config[category:{0}, name:{1}] found", diff --git a/conf/i18n_json/i18n_rest.json b/conf/i18n_json/i18n_rest.json index 9ab12b1f424..1748f0c16fe 100644 --- a/conf/i18n_json/i18n_rest.json +++ b/conf/i18n_json/i18n_rest.json @@ -8,5 +8,15 @@ "source" ], "fileName": "src/main/java/org/zstack/rest/TypeVerifier.java" + }, + { + "raw": "Invalid value for boolean field [%s], [%s] is not a valid boolean string[true, false].", + "en_US": "Invalid value for boolean field [{0}], [{1}] is not a valid boolean string[true, false].", + "zh_CN": "[%s] 属性值无效,[%s] 不是一个有效的布尔值字符串[true, false]", + "arguments": [ + "f.getName()", + "source" + ], + "fileName": "src/main/java/org/zstack/rest/TypeVerifier.java" } ] \ No newline at end of file diff --git a/conf/i18n_json/i18n_search.json b/conf/i18n_json/i18n_search.json index 7230c22ea67..7519d708cb1 100644 --- a/conf/i18n_json/i18n_search.json +++ b/conf/i18n_json/i18n_search.json @@ -30,6 +30,17 @@ ], "fileName": "src/main/java/org/zstack/query/MysqlQueryBuilderImpl3.java" }, + { + "raw": "field[%s] is not a primitive of the inventory %s; you cannot specify it in the parameter \u0027fields\u0027;valid fields are %s", + "en_US": "field[{0}] is not a primitive of the inventory {1}; you cannot specify it in the parameter \u0027fields\u0027;valid fields are {2}", + "zh_CN": "值[{1}]不是清单[{0}]的基元类型,不能在参数\u0027fields\u0027中指定", + "arguments": [ + "f", + "info.inventoryClass.getSimpleName()", + "info.premitiveFieldNames" + ], + "fileName": "src/main/java/org/zstack/query/MysqlQueryBuilderImpl3.java" + }, { "raw": "filterName must be formatted as [filterType:condition(s)]", "en_US": "filterName must be formatted as [filterType:condition(s)]", diff --git a/conf/i18n_json/i18n_sftpBackupStorage.json b/conf/i18n_json/i18n_sftpBackupStorage.json index bbac5f54bb9..50faeaf2e51 100644 --- a/conf/i18n_json/i18n_sftpBackupStorage.json +++ b/conf/i18n_json/i18n_sftpBackupStorage.json @@ -29,6 +29,16 @@ ], "fileName": "src/main/java/org/zstack/storage/backup/sftp/SftpBackupStorage.java" }, + { + "raw": "the uuid of sftpBackupStorage agent changed[expected:%s, actual:%s], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "en_US": "the uuid of sftpBackupStorage agent changed[expected:{0}, actual:{1}], it\u0027s most likely the agent was manually restarted. Issue a reconnect to sync the status", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "ret.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/backup/sftp/SftpBackupStorage.java" + }, { "raw": "unable to connect to SimpleHttpBackupStorage[url:%s], because %s", "en_US": "unable to connect to SimpleHttpBackupStorage[url:{0}], because {1}", diff --git a/conf/i18n_json/i18n_sharedMountPointPrimaryStorage.json b/conf/i18n_json/i18n_sharedMountPointPrimaryStorage.json index 8029b65ef3c..1a58fdb73bc 100644 --- a/conf/i18n_json/i18n_sharedMountPointPrimaryStorage.json +++ b/conf/i18n_json/i18n_sharedMountPointPrimaryStorage.json @@ -8,6 +8,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/smp/KvmBackend.java" }, + { + "raw": "cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared mount point storage[uuid:%s] are disconnected", + "en_US": "cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared mount point storage[uuid:{0}] are disconnected", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/KvmBackend.java" + }, { "raw": "vm[uuid:%s] is not Running, Paused or Stopped, current state[%s]", "en_US": "vm[uuid:{0}] is not Running, Paused or Stopped, current state[{1}]", @@ -29,6 +38,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/smp/KvmBackend.java" }, + { + "raw": "the image[uuid:%s, name: %s] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:%s]", + "en_US": "the image[uuid:{0}, name: {1}] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "imgInv.getUuid()", + "imgInv.getName()", + "self.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/KvmBackend.java" + }, { "raw": "cannot find backup storage[uuid:%s]", "en_US": "cannot find backup storage[uuid:{0}]", @@ -77,6 +97,26 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/smp/KvmBackend.java" }, + { + "raw": "the shared mount point primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters for instantiating the volume", + "en_US": "the shared mount point primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" + }, + { + "raw": "the SMP primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the SMP primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" + }, { "raw": "not supported operation", "en_US": "not supported operation", @@ -94,6 +134,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" }, + { + "raw": "cannot find available host for operation on primary storage[uuid:%s].", + "en_US": "cannot find available host for operation on primary storage[uuid:{0}].", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" + }, { "raw": "host where vm[uuid:%s] locate is not Connected.", "en_US": "host where vm[uuid:{0}] locate is not Connected.", @@ -103,6 +152,27 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" }, + { + "raw": "volume[uuid:%s] has reference volume[%s], can not change volume type before flatten them and their descendants", + "en_US": "volume[uuid:{0}] has reference volume[{1}], can not change volume type before flatten them and their descendants", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "infos.toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageBase.java" + }, + { + "raw": "the SMP primary storage[uuid:%s] is not attached to any clusters, and cannot expunge the root volume[uuid:%s] of the VM[uuid:%s]", + "en_US": "the SMP primary storage[uuid:{0}] is not attached to any clusters, and cannot expunge the root volume[uuid:{1}] of the VM[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "psUuid", + "vmUuid", + "volumeUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/smp/SMPPrimaryStorageFactory.java" + }, { "raw": "cannot find a Connected host to execute command for smp primary storage[uuid:%s]", "en_US": "cannot find a Connected host to execute command for smp primary storage[uuid:{0}]", diff --git a/conf/i18n_json/i18n_sharedblock.json b/conf/i18n_json/i18n_sharedblock.json index 53ee03bf156..b9a1eb8b806 100644 --- a/conf/i18n_json/i18n_sharedblock.json +++ b/conf/i18n_json/i18n_sharedblock.json @@ -9,6 +9,32 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/HaSanlockHostChecker.java" }, + { + "raw": "can not find volume need to operate shared block group primary storage", + "en_US": "can not find volume need to operate shared block group primary storage", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/KvmAgentCommandDispatcher.java" + }, + { + "raw": "KVM host which volume[uuid%s] attached disconnected with the shared block group storage[uuid:%s]", + "en_US": "KVM host which volume[uuid{0}] attached disconnected with the shared block group storage[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "volumeInventory.getUuid()", + "primaryStorageUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/KvmAgentCommandDispatcher.java" + }, + { + "raw": "cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared block group storage[uuid:%s] are disconnected", + "en_US": "cannot find any connected host to perform the operation, it seems all KVM hosts in the clusters attached with the shared block group storage[uuid:{0}] are disconnected", + "zh_CN": "", + "arguments": [ + "primaryStorageUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/KvmAgentCommandDispatcher.java" + }, { "raw": "templated vm[uuid: %s] cannot be create from vm with scsi lun[uuids: %s]", "en_US": "templated vm[uuid: {0}] cannot be create from vm with scsi lun[uuids: {1}]", @@ -29,6 +55,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" }, + { + "raw": "the vm[uuid: %s] does not has additional qmp socket, it may because of the vm start without the global config[vm.additionalQmp] enabled, please make sure it enabled and reboot vm in zstack", + "en_US": "the vm[uuid: {0}] does not has additional qmp socket, it may because of the vm start without the global config[vm.additionalQmp] enabled, please make sure it enabled and reboot vm in zstack", + "zh_CN": "", + "arguments": [ + "msg.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" + }, { "raw": "must specify at least one disk when add shared block group primary storage", "en_US": "must specify at least one disk when add shared block group primary storage", @@ -36,6 +71,18 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" }, + { + "raw": "shared block[uuid:%s, diskUuid:%s, description:%s] already added to shared block group[uuid:%s]in new shared block group", + "en_US": "shared block[uuid:{0}, diskUuid:{1}, description:{2}] already added to shared block group[uuid:{3}]in new shared block group", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "vo.getDiskUuid()", + "vo.getDescription()", + "vo.getSharedBlockGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" + }, { "raw": "shared volume[uuid: %s] on shared block group primary storage can not resize", "en_US": "shared volume[uuid: {0}] on shared block group primary storage can not resize", @@ -64,6 +111,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" }, + { + "raw": "use the thick provisioning volume as the cache volume. the preparation of the volume[%s] is %s", + "en_US": "use the thick provisioning volume as the cache volume. the preparation of the volume[{0}] is {1}", + "zh_CN": "", + "arguments": [ + "msg.getVolumeUuid()", + "volumeProvisioningStrategy" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" + }, { "raw": "the scsi lun[uuid: %s, wwid: %s] is already attach to primary storage[uuid: %s]", "en_US": "the scsi lun[uuid: {0}, wwid: {1}] is already attach to primary storage[uuid: {2}]", @@ -75,6 +132,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockApiInterceptor.java" }, + { + "raw": "the shared mount point primary storage[uuid:%s, name:%s] cannot find any available host in attached clusters for instantiating the volume", + "en_US": "the shared mount point primary storage[uuid:{0}, name:{1}] cannot find any available host in attached clusters for instantiating the volume", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" + }, { "raw": "can not found any cluster attached on shared block group primary storage[uuid: %S]", "en_US": "can not found any cluster attached on shared block group primary storage[uuid: %S]", @@ -84,6 +151,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" }, + { + "raw": "the shared block group primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the shared block group primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" + }, { "raw": "failed to connect to all clusters%s", "en_US": "failed to connect to all clusters{0}", @@ -93,6 +170,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" }, + { + "raw": "the SharedBlock primary storage[uuid:%s, name:%s] has not attached to any clusters, or no hosts in the attached clusters are connected", + "en_US": "the SharedBlock primary storage[uuid:{0}, name:{1}] has not attached to any clusters, or no hosts in the attached clusters are connected", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" + }, { "raw": "cannot find volume snapshot[uuid:%s]", "en_US": "cannot find volume snapshot[uuid:{0}]", @@ -102,6 +189,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" }, + { + "raw": "cannot find available host for operation on primary storage[uuid:%s].", + "en_US": "cannot find available host for operation on primary storage[uuid:{0}].", + "zh_CN": "", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageBase.java" + }, { "raw": "host where vm[uuid:%s] locate is not Connected.", "en_US": "host where vm[uuid:{0}] locate is not Connected.", @@ -162,6 +258,26 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageFactory.java" }, + { + "raw": "cannot find a host which has connected shared block to execute command for shared block group primary storage[uuid:%s]", + "en_US": "cannot find a host which has connected shared block to execute command for shared block group primary storage[uuid:{0}]", + "zh_CN": "", + "arguments": [ + "pri.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageFactory.java" + }, + { + "raw": "the host[uuid: %s] running on is not available to resize volume[uuid: %s] on shared block group primary storage[uuid: %s]", + "en_US": "the host[uuid: {0}] running on is not available to resize volume[uuid: {1}] on shared block group primary storage[uuid: {2}]", + "zh_CN": "", + "arguments": [ + "vmvo.getHostUuid()", + "volumeInventory.getUuid()", + "volumeInventory.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockGroupPrimaryStorageFactory.java" + }, { "raw": "primary storage[uuid:%s] not found", "en_US": "primary storage[uuid:{0}] not found", @@ -189,6 +305,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockImageStoreBackend.java" }, + { + "raw": "can not find qualified kvm host for shared block group primary storage[uuid: %s]", + "en_US": "can not find qualified kvm host for shared block group primary storage[uuid: {0}]", + "zh_CN": "", + "arguments": [ + "msg.getPsUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockImageStoreKvmBackend.java" + }, { "raw": "shared volume not support thin provisioning", "en_US": "shared volume not support thin provisioning", @@ -226,6 +351,17 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" }, + { + "raw": "the image[uuid:%s, name: %s] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:%s]", + "en_US": "the image[uuid:{0}, name: {1}] is not available to download on any backup storage:\\n1. check if image is in status of Deleted\\n2. check if the backup storage on which the image is shown as Ready is attached to the zone[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "img.getUuid()", + "img.getName()", + "self.getZoneUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, { "raw": "the image[uuid: %s, name:%s] is not found on any backup storage", "en_US": "the image[uuid: {0}, name:{1}] is not found on any backup storage", @@ -305,6 +441,16 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" }, + { + "raw": "can not find any available host to take snapshot for volume[uuid: %s] on shared block group primary storage[uuid: %s]", + "en_US": "can not find any available host to take snapshot for volume[uuid: {0}] on shared block group primary storage[uuid: {1}]", + "zh_CN": "", + "arguments": [ + "msg.getVolumeUuid()", + "msg.getTargetPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, { "raw": "only support full", "en_US": "only support full", @@ -312,6 +458,47 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" }, + { + "raw": "can not find any available host to migrate volume[uuid: %s] between shared block group primary storage[uuid: %s] and [uuid: %s]", + "en_US": "can not find any available host to migrate volume[uuid: {0}] between shared block group primary storage[uuid: {1}] and [uuid: {2}]", + "zh_CN": "", + "arguments": [ + "msg.getMigrateVolumeStructs().get(0).volumeUuid", + "msg.getPrimaryStorageUuid()", + "msg.getTargetPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, + { + "raw": "can not find any available host to migrate for volume[uuid: %s] on shared block group primary storage[uuid: %s] and [uuid: %s]", + "en_US": "can not find any available host to migrate for volume[uuid: {0}] on shared block group primary storage[uuid: {1}] and [uuid: {2}]", + "zh_CN": "", + "arguments": [ + "msg.getMigrateVolumeStructs().get(0).volumeUuid", + "msg.getTargetPrimaryStorageUuid()", + "msg.getTargetPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, + { + "raw": "can not find hosts both connect to primary storage[uuid: %s] and primary storage[uuid: %s]", + "en_US": "can not find hosts both connect to primary storage[uuid: {0}] and primary storage[uuid: {1}]", + "zh_CN": "", + "arguments": [ + "msg.getPrimaryStorageUuid()", + "msg.getTargetPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, + { + "raw": "cannot find any connected host to perform the operation, it seems all KVM hosts attached with the shared block group storage[uuid:%s] are disconnected", + "en_US": "cannot find any connected host to perform the operation, it seems all KVM hosts attached with the shared block group storage[uuid:{0}] are disconnected", + "zh_CN": "", + "arguments": [ + "msg.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, { "raw": "operation error, because:%s", "en_US": "operation error, because:{0}", @@ -369,6 +556,15 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" }, + { + "raw": "deactive installPath failed, because %s", + "en_US": "deactive installPath failed, because {0}", + "zh_CN": "", + "arguments": [ + "errorCodeList.getCauses().toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/SharedBlockKvmBackend.java" + }, { "raw": "invalid thinProvisioningInitializeSize tag", "en_US": "invalid thinProvisioningInitializeSize tag", @@ -423,6 +619,40 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/migration/SblkToSblkMigrateVolumeFlow.java" }, + { + "raw": "cannot find the image[uuid:%s] in any connected backup storage attached to the zone[uuid:%s]. check below:\\n1. whether the backup storage is attached to the zone[uuid:%s]\\n2. whether the backup storage is in connected status; try to reconnect it if not", + "en_US": "cannot find the image[uuid:{0}] in any connected backup storage attached to the zone[uuid:{1}]. check below:\\n1. whether the backup storage is attached to the zone[uuid:{2}]\\n2. whether the backup storage is in connected status; try to reconnect it if not", + "zh_CN": "", + "arguments": [ + "imageUuid", + "zoneUuid", + "zoneUuid" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/migration/SblkToSblkMigrateVolumeFlow.java" + }, + { + "raw": "there are not enough capacity for image[uuid: %s] download while volume[uuid: %s] storage migration, required capacity: %s, current available physical capacity: %s", + "en_US": "there are not enough capacity for image[uuid: {0}] download while volume[uuid: {1}] storage migration, required capacity: {2}, current available physical capacity: {3}", + "zh_CN": "", + "arguments": [ + "image.getUuid()", + "volumeUuid", + "image.getActualSize()", + "dstPsInv.getAvailablePhysicalCapacity()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/migration/SblkToSblkMigrateVolumeFlow.java" + }, + { + "raw": "there are not enough capacity for volume[uuid: %s] storage migration, required capacity: %s, current available physical capacity: %s", + "en_US": "there are not enough capacity for volume[uuid: {0}] storage migration, required capacity: {1}, current available physical capacity: {2}", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "volumeVO.getActualSize()", + "dstPsInv.getAvailablePhysicalCapacity()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/sharedblock/migration/SblkToSblkMigrateVolumeFlow.java" + }, { "raw": "data on source ps[uuid: %s] has been discarded, not support rollback", "en_US": "data on source ps[uuid: {0}] has been discarded, not support rollback", diff --git a/conf/i18n_json/i18n_slb.json b/conf/i18n_json/i18n_slb.json index 195d889e4af..eabd99071f0 100644 --- a/conf/i18n_json/i18n_slb.json +++ b/conf/i18n_json/i18n_slb.json @@ -1,4 +1,40 @@ [ + { + "raw": "could not create slb instance because there is no load balancer slb group [uuid:%s]", + "en_US": "could not create slb instance because there is no load balancer slb group [uuid:{0}]", + "zh_CN": "", + "arguments": [ + "msg.getSlbGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not create slb instance because there is no slb offering configured for slb group [uuid:%s]", + "en_US": "could not create slb instance because there is no slb offering configured for slb group [uuid:{0}]", + "zh_CN": "", + "arguments": [ + "msg.getSlbGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not create slb instance because image uuid of slb offering [uuid:%s] is null", + "en_US": "could not create slb instance because image uuid of slb offering [uuid:{0}] is null", + "zh_CN": "", + "arguments": [ + "msg.getSlbGroupUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not create slb instance because image [uuid:%s] is deleted", + "en_US": "could not create slb instance because image [uuid:{0}] is deleted", + "zh_CN": "", + "arguments": [ + "slbOfferingVO.getImageUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, { "raw": "could not create slb group because invalid front l3 network type %s", "en_US": "could not create slb group because invalid front l3 network type {0}", @@ -15,6 +51,18 @@ "arguments": [], "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" }, + { + "raw": "could not execute the api operation. front network [uuid:%s] cidr [%s] is overlapped with management l3 network[uuid:%s] cidr [%s]", + "en_US": "could not execute the api operation. front network [uuid:{0}] cidr [{1}] is overlapped with management l3 network[uuid:{2}] cidr [{3}]", + "zh_CN": "", + "arguments": [ + "frontL3Uuid", + "frontL3Cidr", + "mgmtL3Uuid", + "mgmtL3Cidr" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, { "raw": "could not create slb group, because backend network doesn\u0027t support ipv6 yet", "en_US": "could not create slb group, because backend network doesn\u0027t support ipv6 yet", @@ -22,6 +70,117 @@ "arguments": [], "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] cidr [%s] is overlapped with frond l3 network[uuid:%s] cidr [%s]", + "en_US": "could not execute the api operation. backend network [uuid:{0}] cidr [{1}] is overlapped with frond l3 network[uuid:{2}] cidr [{3}]", + "zh_CN": "", + "arguments": [ + "uuid", + "backendL3Cidr", + "frontL3Uuid", + "frontL3Cidr" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] cidr [%s] is overlapped with management l3 network[uuid:%s] cidr [%s]", + "en_US": "could not execute the api operation. backend network [uuid:{0}] cidr [{1}] is overlapped with management l3 network[uuid:{2}] cidr [{3}]", + "zh_CN": "", + "arguments": [ + "uuid", + "backendL3Cidr", + "mgmtL3Uuid", + "mgmtL3Cidr" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. frontend network [uuid:%s] is not connected vpc router", + "en_US": "could not execute the api operation. frontend network [uuid:{0}] is not connected vpc router", + "zh_CN": "", + "arguments": [ + "frontL3.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] must be vpc network because frond l3 network is vpc network", + "en_US": "could not execute the api operation. backend network [uuid:{0}] must be vpc network because frond l3 network is vpc network", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] is not connected vpc router", + "en_US": "could not execute the api operation. backend network [uuid:{0}] is not connected vpc router", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] is connected vpc router [uuid:%s] while front network is connected to vpc router[uuid:%s]", + "en_US": "could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] while front network is connected to vpc router[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "uuid", + "backendVrUuids.get(0)", + "frontVrUuids.get(0)" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] must be private flat network because frond l3 network is private flat network", + "en_US": "could not execute the api operation. backend network [uuid:{0}] must be private flat network because frond l3 network is private flat network", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] is connected vpc router [uuid:%s] which is not connect to front network[uuid:%s]", + "en_US": "could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] which is not connect to front network[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "backendL3Uuids.get(0)", + "firstBackendVrUuids.get(0)", + "frontL3Uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] must be vpc network because other backend network is vpc network", + "en_US": "could not execute the api operation. backend network [uuid:{0}] must be vpc network because other backend network is vpc network", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] is connected vpc router [uuid:%s] while other backend network is connected to vpc router[uuid:%s]", + "en_US": "could not execute the api operation. backend network [uuid:{0}] is connected vpc router [uuid:{1}] while other backend network is connected to vpc router[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "uuid", + "bVrUuids.get(0)", + "firstBackendVrUuids.get(0)" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not execute the api operation. backend network [uuid:%s] can not be vpc network because other backend network is not vpc network", + "en_US": "could not execute the api operation. backend network [uuid:{0}] can not be vpc network because other backend network is not vpc network", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, { "raw": "could not create slb group because invalid deploy type %s", "en_US": "could not create slb group because invalid deploy type {0}", @@ -40,6 +199,64 @@ ], "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ipv4 address[%s] format error", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ipv4 address[{1}] format error", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "ip4" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ipv4 netmask[%s] format error", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ipv4 netmask[{1}] format error", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "ip4" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ip address and netmask must be set in systemTag", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ip address and netmask must be set in systemTag", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ipv6 address[%s] format error", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ipv6 address[{1}] format error", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "map.get(VmSystemTags.STATIC_IP_TOKEN)" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ipv6 prefix[%s] format error", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ipv6 prefix[{1}] format error", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()", + "prefix" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not attach l3 network [uuid:%s] to SLB instance, because ip address and prefix must be set in systemTag", + "en_US": "can not attach l3 network [uuid:{0}] to SLB instance, because ip address and prefix must be set in systemTag", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, { "raw": "can not detach front end l3 network [uuid:%s] from SLB instance", "en_US": "can not detach front end l3 network [uuid:{0}] from SLB instance", @@ -60,6 +277,35 @@ ], "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" }, + { + "raw": "can not detach nic [uuid:%s] from SLB instance, because it is the last backend l3 network nic", + "en_US": "can not detach nic [uuid:{0}] from SLB instance, because it is the last backend l3 network nic", + "zh_CN": "", + "arguments": [ + "msg.getVmNicUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not create load balancer because vip [uuid:%s] has attached other network service [%s]", + "en_US": "can not create load balancer because vip [uuid:{0}] has attached other network service [{1}]", + "zh_CN": "", + "arguments": [ + "msg.getVipUuid()", + "vipVO.getServicesTypes()" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not create load balancer because vip [uuid:%s] has attached to vpc router [%s]", + "en_US": "can not create load balancer because vip [uuid:{0}] has attached to vpc router [{1}]", + "zh_CN": "", + "arguments": [ + "msg.getVipUuid()", + "vrUuids" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, { "raw": "can not create load balancer because invalid slb group [uuid:%s]", "en_US": "can not create load balancer because invalid slb group [uuid:{0}]", @@ -78,6 +324,41 @@ ], "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" }, + { + "raw": "could not add vmnic to load balancer server group because l3 network [uuid:%s] is connected any vpc router", + "en_US": "could not add vmnic to load balancer server group because l3 network [uuid:{0}] is connected any vpc router", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not add vmnic to load balancer server group because l3 network[uuid:%s] is connected to different vpc router", + "en_US": "could not add vmnic to load balancer server group because l3 network[uuid:{0}] is connected to different vpc router", + "zh_CN": "", + "arguments": [ + "uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "could not add vmnic to load balancer server group because l3 network is not connected slb instance", + "en_US": "could not add vmnic to load balancer server group because l3 network is not connected slb instance", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbApiInterceptor.java" + }, + { + "raw": "can not find nic of slb instance [uuid:%s] which is attached to slb group front l3 network [uuid:%s]", + "en_US": "can not find nic of slb instance [uuid:{0}] which is attached to slb group front l3 network [uuid:{1}]", + "zh_CN": "", + "arguments": [ + "slbInstance.getUuid()", + "frontL3Uuid" + ], + "fileName": "src/main/java/org/zstack/network/service/slb/SlbCreatePublicVipFlow.java" + }, { "raw": "failed to create vip%s on virtual router[uuid:%s], because %s", "en_US": "failed to create vip{0} on virtual router[uuid:{1}], because {2}", diff --git a/conf/i18n_json/i18n_snmp.json b/conf/i18n_json/i18n_snmp.json index 59a6558b73e..449925eed77 100644 --- a/conf/i18n_json/i18n_snmp.json +++ b/conf/i18n_json/i18n_snmp.json @@ -27,6 +27,60 @@ "arguments": [], "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" }, + { + "raw": "Failed to %s SNMP agent, because readCommunity can not be empty when version is v2c", + "en_US": "Failed to {0} SNMP agent, because readCommunity can not be empty when version is v2c", + "zh_CN": "对 SNMP 的 {0} 操作失败,当版本为 v2c 时 readCommunity 不能为空", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, + { + "raw": "Failed to %s SNMP agent, because userName can not be empty when version is v3", + "en_US": "Failed to {0} SNMP agent, because userName can not be empty when version is v3", + "zh_CN": "对 SNMP 的 {0} 操作失败,当版本为 v3 时 userName 不能为空", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, + { + "raw": "Failed to %s SNMP agent, auth algorithm can not be null when password is not null.", + "en_US": "Failed to {0} SNMP agent, auth algorithm can not be null when password is not null.", + "zh_CN": "对 SNMP 的 {0} 操作失败,当密码不为空时,认证算法不能为空", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, + { + "raw": "Failed to %s SNMP agent, because auth password can not be empty.", + "en_US": "Failed to {0} SNMP agent, because auth password can not be empty.", + "zh_CN": "对 SNMP 的 {0} 操作失败,密码不能为空", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, + { + "raw": "Failed to %s SNMP agent, because setting data encryption requires setting user verification first.", + "en_US": "Failed to {0} SNMP agent, because setting data encryption requires setting user verification first.", + "zh_CN": "对 SNMP 的 {0} 操作失败,当设置数据加密时,需要先设置用户验证", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, + { + "raw": "Failed to %s SNMP agent, because privacy password can not be empty.", + "en_US": "Failed to {0} SNMP agent, because privacy password can not be empty.", + "zh_CN": "对 SNMP 的 {0} 操作失败,privacy password 不能为空", + "arguments": [ + "operation" + ], + "fileName": "src/main/java/org/zstack/snmp/SnmpApiInterceptor.java" + }, { "raw": "can\u0027t get SnmpAgentImpl instance, due to no SnmpAgentVO exist.", "en_US": "can\u0027t get SnmpAgentImpl instance, due to no SnmpAgentVO exist.", diff --git a/conf/i18n_json/i18n_software-package-plugin.json b/conf/i18n_json/i18n_software-package-plugin.json index 384be73f4e9..00bcfb0e496 100644 --- a/conf/i18n_json/i18n_software-package-plugin.json +++ b/conf/i18n_json/i18n_software-package-plugin.json @@ -53,6 +53,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/softwarePackage/ShellCommandUtils.java" }, + { + "raw": "Invalid install path detected: %s. Paths must only contain letters, numbers, underscores, dashes, colons, spaces, dots and slashes. Path traversal sequences (.. and //) are not allowed. Path must be absolute. Path must not be root path", + "en_US": "Invalid install path detected: {0}. Paths must only contain letters, numbers, underscores, dashes, colons, spaces, dots and slashes. Path traversal sequences (.. and //) are not allowed. Path must be absolute. Path must not be root path", + "zh_CN": "创建软件包时,路径包含非法字符。路径只能包含字母、数字、下划线、中划线、冒号、空格、点、斜杠。路径不能包含 .. 或 //。路径必须为绝对路径。路径不能为根路径", + "arguments": [ + "path" + ], + "fileName": "src/main/java/org/zstack/softwarePackage/SoftwarePackageApiInterceptor.java" + }, { "raw": "software package [%s] cannot be installed in current state [%s]. Allowed states: %s or %s.", "en_US": "software package [{0}] cannot be installed in current state [{1}]. Allowed states: {2} or {3}.", @@ -76,6 +85,24 @@ ], "fileName": "src/main/java/org/zstack/softwarePackage/SoftwarePackageApiInterceptor.java" }, + { + "raw": "failed to identify software package type. package: %s, installPath: %s, unzipPath: %s. please verify the package format is correct and a corresponding extension point is registered.", + "en_US": "failed to identify software package type. package: {0}, installPath: {1}, unzipPath: {2}. please verify the package format is correct and a corresponding extension point is registered.", + "zh_CN": "创建软件包时,无法识别软件包的类型。软件包:{0}, 安装路径:{1}, 解压路径:{2}。请确认软件包的格式正确,并且已注册相应的扩展点。", + "arguments": [ + "finalVo.getName()", + "finalVo.getInstallPath()", + "finalVo.getUnzipInstallPath()" + ], + "fileName": "src/main/java/org/zstack/softwarePackage/SoftwarePackagePackageManagerImpl.java" + }, + { + "raw": "a non-management node installation of the software package is detected in this environment. to proceed with a new management node-based installation, please first:\\n1. uninstall the existing manually installed components\\n2. ensure the environment is completely clean\\nnote: this installation must be performed exclusively through the management node", + "en_US": "a non-management node installation of the software package is detected in this environment. to proceed with a new management node-based installation, please first:\\n1. uninstall the existing manually installed components\\n2. ensure the environment is completely clean\\nnote: this installation must be performed exclusively through the management node", + "zh_CN": "创建软件包时,检测到非管理节点安装。要继续使用管理节点进行新安装,请首先:\\n1. 卸载现有的手动安装组件\\n2. 确保环境是干净的\\n注意:此安装必须仅通过管理节点进行", + "arguments": [], + "fileName": "src/main/java/org/zstack/softwarePackage/SoftwarePackagePackageManagerImpl.java" + }, { "raw": "no extension point found for software package type: %s", "en_US": "no extension point found for software package type: {0}", diff --git a/conf/i18n_json/i18n_storage-device.json b/conf/i18n_json/i18n_storage-device.json index 792a835d38e..1adf0579c68 100644 --- a/conf/i18n_json/i18n_storage-device.json +++ b/conf/i18n_json/i18n_storage-device.json @@ -232,6 +232,27 @@ ], "fileName": "src/main/java/org/zstack/storage/device/StorageDeviceManagerImpl.java" }, + { + "raw": "different iscsi configuration were found on host[uuid:%s, targets:%s]and host[uuid:%s, targets:%s]", + "en_US": "different iscsi configuration were found on host[uuid:{0}, targets:{1}]and host[uuid:{2}, targets:{3}]", + "zh_CN": "被在主机 [uuid:%s, targets:%s] 和主机 [uuid:%s, targets:%s] 中发现了不同的 iscsi 配置", + "arguments": [ + "scannedServer.getKey()", + "JSONObjectUtil.toJsonString(scannedTargets)", + "hostVO.getUuid()", + "JSONObjectUtil.toJsonString(returnValue.getIscsiTargets())" + ], + "fileName": "src/main/java/org/zstack/storage/device/StorageDeviceManagerImpl.java" + }, + { + "raw": "different disk types are found in different hosts for lun[serial:%s], unable to attach it to cluster", + "en_US": "different disk types are found in different hosts for lun[serial:{0}], unable to attach it to cluster", + "zh_CN": "不同的主机中存在不同类型的 lun[serial:%s],无法将其挂载到集群中", + "arguments": [ + "serial" + ], + "fileName": "src/main/java/org/zstack/storage/device/StorageDeviceManagerImpl.java" + }, { "raw": "specified scsi lun[wwid: %s] not exists or disabled", "en_US": "specified scsi lun[wwid: {0}] not exists or disabled", diff --git a/conf/i18n_json/i18n_storage.json b/conf/i18n_json/i18n_storage.json index f72ad0b8b2d..e1ac06ed022 100644 --- a/conf/i18n_json/i18n_storage.json +++ b/conf/i18n_json/i18n_storage.json @@ -68,6 +68,23 @@ ], "fileName": "src/main/java/org/zstack/storage/addon/primary/ExternalPrimaryStorageFactory.java" }, + { + "raw": "not support protocol[%s] on type[%s] primary storage", + "en_US": "not support protocol[{0}] on type[{1}] primary storage", + "zh_CN": "", + "arguments": [ + "protocol", + "getPrimaryStorageType()" + ], + "fileName": "src/main/java/org/zstack/storage/addon/primary/ExternalPrimaryStorageFactory.java" + }, + { + "raw": "not support take volumes snapshots on multiple ps when including storage snapshot", + "en_US": "not support take volumes snapshots on multiple ps when including storage snapshot", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/addon/primary/ExternalPrimaryStorageFactory.java" + }, { "raw": "cannot find ExternalPrimaryStorage[uuid:%s]", "en_US": "cannot find ExternalPrimaryStorage[uuid:{0}]", @@ -152,6 +169,29 @@ ], "fileName": "src/main/java/org/zstack/storage/backup/BackupStorageBase.java" }, + { + "raw": "the image size get from url %s is %d bytes, it\u0027s too small for an image, please check the url again.", + "en_US": "the image size get from url {0} is {1} bytes, it\u0027s too small for an image, please check the url again.", + "zh_CN": "", + "arguments": [ + "url", + "size" + ], + "fileName": "src/main/java/org/zstack/storage/backup/BackupStorageBase.java" + }, + { + "raw": "the backup storage[uuid:%s, name:%s] has not enough capacity to download the image[%s].Required size:%s, available size:%s", + "en_US": "the backup storage[uuid:{0}, name:{1}] has not enough capacity to download the image[{2}].Required size:{3}, available size:{4}", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "self.getName()", + "url", + "size", + "available" + ], + "fileName": "src/main/java/org/zstack/storage/backup/BackupStorageBase.java" + }, { "raw": "backup storage cannot proceed message[%s] because its status is %s", "en_US": "backup storage cannot proceed message[{0}] because its status is {1}", @@ -235,6 +275,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/backup/DefaultBackupStorageAllocatorStrategy.java" }, + { + "raw": "outputProtocol[%s] is exist on primary storage[%s]no need to add again", + "en_US": "outputProtocol[{0}] is exist on primary storage[{1}]no need to add again", + "zh_CN": "", + "arguments": [ + "msg.getOutputProtocol()", + "msg.getPrimaryStorageUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageApiInterceptor.java" + }, { "raw": "unknown primary storage type[%s]", "en_US": "unknown primary storage type[{0}]", @@ -297,6 +347,24 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageApiInterceptor.java" }, + { + "raw": "primary storage(s) [uuid: %s] where volume(s) locate is not Enabled or Connected", + "en_US": "primary storage(s) [uuid: {0}] where volume(s) locate is not Enabled or Connected", + "zh_CN": "", + "arguments": [ + "psUuids" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageApiInterceptor.java" + }, + { + "raw": "after removing primary storage%s to avoid, there is no candidate primary storage anymore. please check primary storage status and state in the cluster.", + "en_US": "after removing primary storage{0} to avoid, there is no candidate primary storage anymore. please check primary storage status and state in the cluster.", + "zh_CN": "", + "arguments": [ + "spec.getAvoidPrimaryStorageUuids()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageAvoidAllocatorFlow.java" + }, { "raw": "cannot attach ISO to a primary storage[uuid:%s] which is disabled", "en_US": "cannot attach ISO to a primary storage[uuid:{0}] which is disabled", @@ -343,6 +411,16 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageBase.java" }, + { + "raw": "primary storage[uuid:%s] cannot be deleted for still being attached to cluster[uuid:%s].", + "en_US": "primary storage[uuid:{0}] cannot be deleted for still being attached to cluster[uuid:{1}].", + "zh_CN": "", + "arguments": [ + "self.getUuid()", + "clusterUuidsString" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageBase.java" + }, { "raw": "cannot attach volume[uuid:%s] whose primary storage is Maintenance", "en_US": "cannot attach volume[uuid:{0}] whose primary storage is Maintenance", @@ -352,6 +430,26 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageBase.java" }, + { + "raw": "cannot reserve %s bytes on the primary storage[uuid:%s], it\u0027s short of available capacity", + "en_US": "cannot reserve {0} bytes on the primary storage[uuid:{1}], it\u0027s short of available capacity", + "zh_CN": "", + "arguments": [ + "size", + "capacityVO.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageCapacityUpdater.java" + }, + { + "raw": "the primary storage[uuid:%s] is not in status of Connected, current status is %s", + "en_US": "the primary storage[uuid:{0}] is not in status of Connected, current status is {1}", + "zh_CN": "", + "arguments": [ + "ps.getUuid()", + "ps.getStatus().toString()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageDeleteBitGC.java" + }, { "raw": "PrimaryStorageFeatureAllocatorFlow[%s] returns zero primary storage candidate", "en_US": "PrimaryStorageFeatureAllocatorFlow[{0}] returns zero primary storage candidate", @@ -361,6 +459,18 @@ ], "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageFeatureAllocatorFlow.java" }, + { + "raw": "cannot find primary storage satisfying conditions[connected to host:%s, state:%s, status: %s, available capacity \u003e %s", + "en_US": "cannot find primary storage satisfying conditions[connected to host:{0}, state:{1}, status: {2}, available capacity \u003e {3}", + "zh_CN": "", + "arguments": [ + "spec.getRequiredHostUuid()", + "PrimaryStorageState.Enabled", + "PrimaryStorageStatus.Connected", + "spec.getSize()" + ], + "fileName": "src/main/java/org/zstack/storage/primary/PrimaryStorageMainAllocatorFlow.java" + }, { "raw": "no way to get image size of %s, report exception.", "en_US": "no way to get image size of {0}, report exception.", @@ -533,6 +643,29 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/snapshot/DeleteVolumeSnapshotLongJob.java" }, + { + "raw": "failed to cancel deletion job. Volume[uuid:%s] not attached to any vm, offline snapshot deletion do not support cancel.", + "en_US": "failed to cancel deletion job. Volume[uuid:{0}] not attached to any vm, offline snapshot deletion do not support cancel.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/snapshot/DeleteVolumeSnapshotLongJob.java" + }, + { + "raw": "failed to cancel deletion job. Volume[uuid:%s] attached vm not exists, offline snapshot deletion do not support cancel.", + "en_US": "failed to cancel deletion job. Volume[uuid:{0}] attached vm not exists, offline snapshot deletion do not support cancel.", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/snapshot/DeleteVolumeSnapshotLongJob.java" + }, + { + "raw": "failed to cancel deletion job. Volume[uuid:%s] attached vm not in state %s offline snapshot deletion do not support cancel.", + "en_US": "failed to cancel deletion job. Volume[uuid:{0}] attached vm not in state {1} offline snapshot deletion do not support cancel.", + "zh_CN": "", + "arguments": [ + "VmInstanceState.Running" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/DeleteVolumeSnapshotLongJob.java" + }, { "raw": "volume snapshot[uuids:%s] is in state Disabled, cannot revert volume to it", "en_US": "volume snapshot[uuids:{0}] is in state Disabled, cannot revert volume to it", @@ -637,6 +770,16 @@ ], "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotManagerImpl.java" }, + { + "raw": "primary storage[uuid:%s] doesn\u0027t support volume snapshot; cannot create snapshot for volume[uuid:%s]", + "en_US": "primary storage[uuid:{0}] doesn\u0027t support volume snapshot; cannot create snapshot for volume[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "primaryStorageUuid", + "vol.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotManagerImpl.java" + }, { "raw": "cannot find snapshot: %s", "en_US": "cannot find snapshot: {0}", @@ -696,6 +839,16 @@ ], "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotTreeBase.java" }, + { + "raw": "vm[uuid:%s] is not Running, Paused or Destroyed, Stopped, Destroying, current state[%s]", + "en_US": "vm[uuid:{0}] is not Running, Paused or Destroyed, Stopped, Destroying, current state[{1}]", + "zh_CN": "", + "arguments": [ + "volume.getVmInstanceUuid()", + "vmState" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotTreeBase.java" + }, { "raw": "failed to change status of volume snapshot[uuid:%s, name:%s] by status event[%s]", "en_US": "failed to change status of volume snapshot[uuid:{0}, name:{1}] by status event[{2}]", @@ -707,6 +860,18 @@ ], "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotTreeBase.java" }, + { + "raw": "unable to reset volume[uuid:%s] to snapshot[uuid:%s], the vm[uuid:%s] volume attached to is not in Stopped state, current state is %s", + "en_US": "unable to reset volume[uuid:{0}] to snapshot[uuid:{1}], the vm[uuid:{2}] volume attached to is not in Stopped state, current state is {3}", + "zh_CN": "", + "arguments": [ + "volumeInventory.getUuid()", + "currentRoot.getUuid()", + "vmUuid", + "state" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/VolumeSnapshotTreeBase.java" + }, { "raw": "snapshot(s) %s in the group has been deleted, can only revert one by one.", "en_US": "snapshot(s) {0} in the group has been deleted, can only revert one by one.", @@ -716,6 +881,34 @@ ], "fileName": "src/main/java/org/zstack/storage/snapshot/group/VolumeSnapshotGroupChecker.java" }, + { + "raw": "volume(s) %s is no longer attached, can only revert one by one. If you need to group revert, please re-attach it.", + "en_US": "volume(s) {0} is no longer attached, can only revert one by one. If you need to group revert, please re-attach it.", + "zh_CN": "", + "arguments": [ + "String.join(\", \", detachedVolInfos)" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/group/VolumeSnapshotGroupChecker.java" + }, + { + "raw": "new volume(s) %s attached after snapshot point, can only revert one by one. If you need to group revert, please detach it.", + "en_US": "new volume(s) {0} attached after snapshot point, can only revert one by one. If you need to group revert, please detach it.", + "zh_CN": "", + "arguments": [ + "volInfos" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/group/VolumeSnapshotGroupChecker.java" + }, + { + "raw": " volume[uuid: %s] has been referenced by other volumes [%s], can not change install path before flatten them and their descendants ", + "en_US": " volume[uuid: {0}] has been referenced by other volumes [{1}], can not change install path before flatten them and their descendants ", + "zh_CN": "", + "arguments": [ + "volumeUuid", + "infos.toString()" + ], + "fileName": "src/main/java/org/zstack/storage/snapshot/reference/VolumeSnapshotReferenceUtils.java" + }, { "raw": "current volume state[%s] doesn\u0027t allow to proceed message[%s]", "en_US": "current volume state[{0}] doesn\u0027t allow to proceed message[{1}]", @@ -743,22 +936,22 @@ "fileName": "src/main/java/org/zstack/storage/volume/DownloadIsoForVmExtension.java" }, { - "raw": "volume[uuid:%s] is not in status Ready, current is %s, can\u0027t create snapshot", - "en_US": "volume[uuid:{0}] is not in status Ready, current is {1}, can\u0027t create snapshot", - "zh_CN": "硬盘[uuid:{0}]未出于就绪状态,当前是{1},不能创建快照", + "raw": "volume[uuid:%s, type:%s], can\u0027t create snapshot", + "en_US": "volume[uuid:{0}, type:{1}], can\u0027t create snapshot", + "zh_CN": "卷[uuid:{0},类型:{1}],无法创建快照", "arguments": [ "msg.getVolumeUuid()", - "status" + "type" ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, { - "raw": "volume[uuid:%s, type:%s], can\u0027t create snapshot", - "en_US": "volume[uuid:{0}, type:{1}], can\u0027t create snapshot", - "zh_CN": "卷[uuid:{0},类型:{1}],无法创建快照", + "raw": "volume[uuid:%s] is not in state Enabled, current is %s, can\u0027t create snapshot", + "en_US": "volume[uuid:{0}] is not in state Enabled, current is {1}, can\u0027t create snapshot", + "zh_CN": "", "arguments": [ "msg.getVolumeUuid()", - "type" + "state" ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, @@ -782,6 +975,16 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "volume[uuid:%s] is not in status Ready, current is %s, can\u0027t create snapshot", + "en_US": "volume[uuid:{0}] is not in status Ready, current is {1}, can\u0027t create snapshot", + "zh_CN": "硬盘[uuid:{0}]未出于就绪状态,当前是{1},不能创建快照", + "arguments": [ + "vol.getUuid()", + "vol.getStatus()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "the volume[uuid:%s] is not in status of deleted. This is operation is to recover a deleted data volume", "en_US": "the volume[uuid:{0}] is not in status of deleted. This is operation is to recover a deleted data volume", @@ -958,6 +1161,18 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "data volume[uuid:%s] has format[%s] that can only be attached to hypervisor[%s], but vm has hypervisor type[%s]. Can\u0027t attach", + "en_US": "data volume[uuid:{0}] has format[{1}] that can only be attached to hypervisor[{2}], but vm has hypervisor type[{3}]. Can\u0027t attach", + "zh_CN": "", + "arguments": [ + "volumeVO.getUuid()", + "volumeVO.getFormat()", + "hvTypes", + "hvType" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "Can\u0027t attach volume to VM, no qualified cluster", "en_US": "Can\u0027t attach volume to VM, no qualified cluster", @@ -1010,6 +1225,16 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "can not delete volume[%s], because volume attach to host[%s]", + "en_US": "can not delete volume[{0}], because volume attach to host[{1}]", + "zh_CN": "", + "arguments": [ + "msg.getVolumeUuid()", + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "it\u0027s not allowed to change state of root volume, uuid:%s", "en_US": "it\u0027s not allowed to change state of root volume, uuid:{0}", @@ -1019,6 +1244,16 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "can not change volume[%s] state, because volume attach to host[%s]", + "en_US": "can not change volume[{0}] state, because volume attach to host[{1}]", + "zh_CN": "", + "arguments": [ + "msg.getVolumeUuid()", + "hostUuid" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "can not attach volume[%s] to host[%s], because host[status:%s] is not connected", "en_US": "can not attach volume[{0}] to host[{1}], because host[status:{2}] is not connected", @@ -1072,6 +1307,15 @@ ], "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" }, + { + "raw": "can not detach volume[%s] from host. it may have been detached", + "en_US": "can not detach volume[{0}] from host. it may have been detached", + "zh_CN": "", + "arguments": [ + "msg.getVolumeUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/volume/VolumeApiInterceptor.java" + }, { "raw": "cannot flatten a shareable volume[uuid:%s]", "en_US": "cannot flatten a shareable volume[uuid:{0}]", diff --git a/conf/i18n_json/i18n_tag2.json b/conf/i18n_json/i18n_tag2.json index a8785b7f175..1689fe2e3ba 100644 --- a/conf/i18n_json/i18n_tag2.json +++ b/conf/i18n_json/i18n_tag2.json @@ -38,6 +38,15 @@ ], "fileName": "src/main/java/org/zstack/tag2/Tag2ApiInterceptor.java" }, + { + "raw": "Get format[%s], format must like that name::{tokenName1}::{tokenName2} ... ::{tokenNameN} or {tokenName1}::{tokenName2} ... ::{tokenNameN} Name cannot contain \u0027{}:\u0027", + "en_US": "Get format[{0}], format must like that name::{tokenName1}::{tokenName2} ... ::{tokenNameN} or {tokenName1}::{tokenName2} ... ::{tokenNameN} Name cannot contain \u0027{}:\u0027", + "zh_CN": "获取格式[{0}],格式必须类似于名称::{tokenName1}::{tokenName2} ... ::{tokenNameN} 或 {tokenName1}::{tokenName2} ... ::{tokenNameN} 名称不能包含 \u0027{}:\u0027", + "arguments": [ + "format" + ], + "fileName": "src/main/java/org/zstack/tag2/Tag2ApiInterceptor.java" + }, { "raw": "all tokens %s must be specify", "en_US": "all tokens {0} must be specify", diff --git a/conf/i18n_json/i18n_ticket.json b/conf/i18n_json/i18n_ticket.json index f5f7e32332b..33e11812b37 100644 --- a/conf/i18n_json/i18n_ticket.json +++ b/conf/i18n_json/i18n_ticket.json @@ -35,6 +35,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/ticket/api/TicketApiInterceptor.java" }, + { + "raw": "no matched ticket flow collection or no default ticket flow collection found, you must specify the flowCollectionUuid or create a default ticket flow collection in system", + "en_US": "no matched ticket flow collection or no default ticket flow collection found, you must specify the flowCollectionUuid or create a default ticket flow collection in system", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/ticket/api/TicketApiInterceptor.java" + }, { "raw": "Ticket flow collection[uuid:%s] not matches ticket type[uuid:%s]", "en_US": "Ticket flow collection[uuid:{0}] not matches ticket type[uuid:{1}]", diff --git a/conf/i18n_json/i18n_virtualRouterProvider.json b/conf/i18n_json/i18n_virtualRouterProvider.json index 43f1717d69f..e99816ef33b 100644 --- a/conf/i18n_json/i18n_virtualRouterProvider.json +++ b/conf/i18n_json/i18n_virtualRouterProvider.json @@ -1,4 +1,15 @@ [ + { + "raw": "the virtual router[name:%s, uuid:%s, current state:%s] is not running,and cannot perform required operation. Please retry your operation later once it is running", + "en_US": "the virtual router[name:{0}, uuid:{1}, current state:{2}] is not running,and cannot perform required operation. Please retry your operation later once it is running", + "zh_CN": "", + "arguments": [ + "self.getName()", + "self.getUuid()", + "self.getState()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/VirtualRouter.java" + }, { "raw": "virtual router[uuid:%s] is in status of %s that cannot make http call to %s", "en_US": "virtual router[uuid:{0}] is in status of {1} that cannot make http call to {2}", @@ -270,6 +281,34 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/VirtualRouterManagerImpl.java" }, + { + "raw": "cannot add ip range, because l3 network[uuid:%s] is management network of virtual router offering", + "en_US": "cannot add ip range, because l3 network[uuid:{0}] is management network of virtual router offering", + "zh_CN": "", + "arguments": [ + "l3NetworkUuid" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/VirtualRouterManagerImpl.java" + }, + { + "raw": "cannot add ip range, because l3 network[uuid:%s] is management network of virtual router", + "en_US": "cannot add ip range, because l3 network[uuid:{0}] is management network of virtual router", + "zh_CN": "", + "arguments": [ + "l3NetworkUuid" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/VirtualRouterManagerImpl.java" + }, + { + "raw": "couldn\u0027t add image, because systemTag [%s] includes invalid appliance image type [%s]", + "en_US": "couldn\u0027t add image, because systemTag [{0}] includes invalid appliance image type [{1}]", + "zh_CN": "", + "arguments": [ + "tag", + "type" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/VirtualRouterManagerImpl.java" + }, { "raw": "failed tot attach virtual router network services to l3Network[uuid:%s]. When eip is selected, snat must be selected too", "en_US": "failed tot attach virtual router network services to l3Network[uuid:{0}]. When eip is selected, snat must be selected too", @@ -377,6 +416,19 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/eip/VirtualRouterEipBackend.java" }, + { + "raw": "found a virtual router offering[uuid:%s] for L3Network[uuid:%s] in zone[uuid:%s]; however, the network\u0027s public network[uuid:%s] is not the same to EIP[uuid:%s]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "en_US": "found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network\u0027s public network[uuid:{3}] is not the same to EIP[uuid:{4}]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "zh_CN": "", + "arguments": [ + "offering.getUuid()", + "l3inv.getUuid()", + "l3inv.getZoneUuid()", + "struct.getVip().getL3NetworkUuid()", + "struct.getEip().getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/eip/VirtualRouterEipBackend.java" + }, { "raw": "failed to remove eip[uuid:%s, name:%s, ip:%s] for vm nic[uuid:%s] on virtual router[uuid:%s], %s", "en_US": "failed to remove eip[uuid:{0}, name:{1}, ip:{2}] for vm nic[uuid:{3}] on virtual router[uuid:{4}], {5}", @@ -408,6 +460,28 @@ "arguments": [], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/ha/VirtualRouterHaBackendImpl.java" }, + { + "raw": "new add vm nics[uuids:%s] and attached vmnics are not on the same vrouter, they are on vrouters[uuids:%s]", + "en_US": "new add vm nics[uuids:{0}] and attached vmnics are not on the same vrouter, they are on vrouters[uuids:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getVmNicUuids()", + "vrUuids" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/lb/VirtualRouterLoadBalancerBackend.java" + }, + { + "raw": "new add vm nics[uuids:%s] and peer l3s[uuids:%s] of loadbalancer[uuid: %s]\u0027s vip are not on the same vrouter, they are on vrouters[uuids:%s]", + "en_US": "new add vm nics[uuids:{0}] and peer l3s[uuids:{1}] of loadbalancer[uuid: {2}]\u0027s vip are not on the same vrouter, they are on vrouters[uuids:{3}]", + "zh_CN": "", + "arguments": [ + "msg.getVmNicUuids()", + "peerL3NetworkUuids", + "msg.getLoadBalancerUuid()", + "vrUuids" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/lb/VirtualRouterLoadBalancerBackend.java" + }, { "raw": "vmnic must be specified for share loadbalancer", "en_US": "vmnic must be specified for share loadbalancer", @@ -483,6 +557,31 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/portforwarding/ReleasePortForwardingRuleOnVirtualRouterVmFlow.java" }, + { + "raw": "found a virtual router offering[uuid:%s] for L3Network[uuid:%s] in zone[uuid:%s]; however, the network\u0027s public network[uuid:%s] is not the same to PortForwarding rule[uuid:%s]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "en_US": "found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network\u0027s public network[uuid:{3}] is not the same to PortForwarding rule[uuid:{4}]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "zh_CN": "", + "arguments": [ + "offering.getUuid()", + "struct.getGuestL3Network().getUuid()", + "struct.getGuestL3Network().getZoneUuid()", + "struct.getVip().getL3NetworkUuid()", + "struct.getRule().getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/portforwarding/VirtualRouterPortForwardingBackend.java" + }, + { + "raw": "virtual router doesn\u0027t support port forwarding range redirection, the vipPortStart must be equals to privatePortStart and vipPortEnd must be equals to privatePortEnd;but this rule rule has a mismatching range: vip port[%s, %s], private port[%s, %s]", + "en_US": "virtual router doesn\u0027t support port forwarding range redirection, the vipPortStart must be equals to privatePortStart and vipPortEnd must be equals to privatePortEnd;but this rule rule has a mismatching range: vip port[{0}, {1}], private port[{2}, {3}]", + "zh_CN": "", + "arguments": [ + "rule.getVipPortStart()", + "rule.getVipPortEnd()", + "rule.getPrivatePortStart()", + "rule.getPrivatePortEnd()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/portforwarding/VirtualRouterPortForwardingBackend.java" + }, { "raw": "failed to add portforwardings on virtual router[uuid:%s], %s", "en_US": "failed to add portforwardings on virtual router[uuid:{0}], {1}", @@ -514,6 +613,19 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/portforwarding/VirtualRouterSyncPortForwardingRulesOnStartFlow.java" }, + { + "raw": "failed to sync vips[ips: %s] on virtual router[uuid:%s] for attaching nic[uuid: %s, ip: %s], because %s", + "en_US": "failed to sync vips[ips: {0}] on virtual router[uuid:{1}] for attaching nic[uuid: {2}, ip: {3}], because {4}", + "zh_CN": "", + "arguments": [ + "vips.stream().map(VipTO::getIp).collect(Collectors.toList())", + "nic.getVmInstanceUuid()", + "nic.getUuid()", + "nic.getIp()", + "ret.getError()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/vip/VirtualRouterVipBackend.java" + }, { "raw": "failed to remove vip%s, because %s", "en_US": "failed to remove vip{0}, because {1}", @@ -545,6 +657,19 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/vip/VirtualRouterVipBaseBackend.java" }, + { + "raw": "found a virtual router offering[uuid:%s] for L3Network[uuid:%s] in zone[uuid:%s]; however, the network\u0027s public network[uuid:%s] is not the same to VIP[uuid:%s]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "en_US": "found a virtual router offering[uuid:{0}] for L3Network[uuid:{1}] in zone[uuid:{2}]; however, the network\u0027s public network[uuid:{3}] is not the same to VIP[uuid:{4}]\u0027s; you may need to use system tag guestL3Network::l3NetworkUuid to specify a particular virtual router offering for the L3Network", + "zh_CN": "", + "arguments": [ + "offering.getUuid()", + "s.getL3Network().getUuid()", + "s.getL3Network().getZoneUuid()", + "self.getL3NetworkUuid()", + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/vip/VirtualRouterVipBaseBackend.java" + }, { "raw": "failed to change nic[ip:%s, mac:%s] firewall default action of virtual router vm[uuid:%s], because %s", "en_US": "failed to change nic[ip:{0}, mac:{1}] firewall default action of virtual router vm[uuid:{2}], because {3}", @@ -557,6 +682,16 @@ ], "fileName": "src/main/java/org/zstack/network/service/virtualrouter/vyos/VyosChangePrivateL3FirewallDefaultActionExtensionPoint.java" }, + { + "raw": "the SSH port is not open after %s seconds. Failed to login the virtual router[ip:%s]", + "en_US": "the SSH port is not open after {0} seconds. Failed to login the virtual router[ip:{1}]", + "zh_CN": "", + "arguments": [ + "timeoutInSeconds", + "mgmtNicIp" + ], + "fileName": "src/main/java/org/zstack/network/service/virtualrouter/vyos/VyosConfigSshFlow.java" + }, { "raw": "unable to ssh in to the virtual router[%s] after configure ssh", "en_US": "unable to ssh in to the virtual router[{0}] after configure ssh", diff --git a/conf/i18n_json/i18n_virtualSwitchNetwork.json b/conf/i18n_json/i18n_virtualSwitchNetwork.json index 74ce3b12964..03dbb71392c 100644 --- a/conf/i18n_json/i18n_virtualSwitchNetwork.json +++ b/conf/i18n_json/i18n_virtualSwitchNetwork.json @@ -85,6 +85,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/L2VirtualSwitchNetwork.java" }, + { + "raw": "an unexpected error caused the bonding to not be created on host[uuid:%s] for virtual switch[uuid:%s]", + "en_US": "an unexpected error caused the bonding to not be created on host[uuid:{0}] for virtual switch[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getHostUuid()", + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/L2VirtualSwitchNetwork.java" + }, { "raw": "interface[uuid:%s] is not found on host[uuid:%s] for virtual switch[uuid:%s]", "en_US": "interface[uuid:{0}] is not found on host[uuid:{1}] for virtual switch[uuid:{2}]", @@ -107,6 +117,25 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/PortGroupFactory.java" }, + { + "raw": "the default virtual switch network[uuid:%s] cannot be deleted when it is still attached to hosts", + "en_US": "the default virtual switch network[uuid:{0}] cannot be deleted when it is still attached to hosts", + "zh_CN": "", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not delete virtual switch network[uuid:%s],because host kernel interface[uuid:%s] still exists on the virtual switch and its host status is not connected", + "en_US": "could not delete virtual switch network[uuid:{0}],because host kernel interface[uuid:{1}] still exists on the virtual switch and its host status is not connected", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "interfaceUuids.get(0)" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not delete l2 network[uuid:%s] with default port group", "en_US": "could not delete l2 network[uuid:{0}] with default port group", @@ -116,6 +145,116 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not delete l2 port group network[uuid:%s],because host kernel interface[%s] still exists on the port group and its host status is not connected", + "en_US": "could not delete l2 port group network[uuid:{0}],because host kernel interface[{1}] still exists on the port group and its host status is not connected", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "interfaceUuids.get(0)" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "cannot delete default port group[uuid:%s], because there are host kernel interfaces still exist on hosts[uuid:%s]", + "en_US": "cannot delete default port group[uuid:{0}], because there are host kernel interfaces still exist on hosts[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "hostUuidsWithDefaultKernel" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not delete port group[uuid:%s], because host kernel interface[uuid:%s] still exists on the port group and its host status is not connected", + "en_US": "could not delete port group[uuid:{0}], because host kernel interface[uuid:{1}] still exists on the port group and its host status is not connected", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "interfaceUuids.get(0)" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not create host kernel interface, because requiredIp cannot be null with l3Network[uuid:%s] disable IPAM", + "en_US": "could not create host kernel interface, because requiredIp cannot be null with l3Network[uuid:{0}] disable IPAM", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not batch create host kernel interface, because hostUuid in struct should be set", + "en_US": "could not batch create host kernel interface, because hostUuid in struct should be set", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not create host kernel interface, because host[uuid:%s] not found", + "en_US": "could not create host kernel interface, because host[uuid:{0}] not found", + "zh_CN": "", + "arguments": [ + "struct.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not create host kernel interface for host[uuid:%s], because name should be set", + "en_US": "could not create host kernel interface for host[uuid:{0}], because name should be set", + "zh_CN": "", + "arguments": [ + "struct.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not batch create host kernel interface, because ip cannot be null with l3Network[uuid:%s] disable IPAM", + "en_US": "could not batch create host kernel interface, because ip cannot be null with l3Network[uuid:{0}] disable IPAM", + "zh_CN": "", + "arguments": [ + "msg.getL3NetworkUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not batch create host kernel interface, because duplicate ipv4 address[%s] in input structs", + "en_US": "could not batch create host kernel interface, because duplicate ipv4 address[{0}] in input structs", + "zh_CN": "", + "arguments": [ + "struct.getIp()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not batch create host kernel interface, because duplicate ipv6 address[%s] in input structs", + "en_US": "could not batch create host kernel interface, because duplicate ipv6 address[{0}] in input structs", + "zh_CN": "", + "arguments": [ + "struct.getIp6()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update host kernel interface[uuid:%s], because netmask cannot be set without requiredIp", + "en_US": "could not update host kernel interface[uuid:{0}], because netmask cannot be set without requiredIp", + "zh_CN": "", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update host kernel interface[uuid:%s], because host[uuid:%s] is not connected", + "en_US": "could not update host kernel interface[uuid:{0}], because host[uuid:{1}] is not connected", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "host.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not delete default host kernel interface[uuid:%s]", "en_US": "could not delete default host kernel interface[uuid:{0}]", @@ -125,6 +264,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not delete host kernel interface[uuid:%s], because host[uuid:%s] is not connected", + "en_US": "could not delete host kernel interface[uuid:{0}], because host[uuid:{1}] is not connected", + "zh_CN": "", + "arguments": [ + "msg.getUuid()", + "host.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "invalid CIDR: %s", "en_US": "invalid CIDR: {0}", @@ -153,6 +302,13 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "physicalInterface should not be null when uplink bonding is set", + "en_US": "physicalInterface should not be null when uplink bonding is set", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "only one systemTag for uplink bonding is allowed", "en_US": "only one systemTag for uplink bonding is allowed", @@ -188,6 +344,26 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not create L2PortGroupNetwork, because L2VirtualSwitchNetwork[uuid:%s] already has L2PortGroupNetworks with the same vlanId[%s]", + "en_US": "could not create L2PortGroupNetwork, because L2VirtualSwitchNetwork[uuid:{0}] already has L2PortGroupNetworks with the same vlanId[{1}]", + "zh_CN": "", + "arguments": [ + "msg.getvSwitchUuid()", + "msg.getVlan()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not attach L2PortGroupNetwork[uuid:%s] to cluster[uuid:%s], which L2VirtualSwitchNetwork should be used", + "en_US": "could not attach L2PortGroupNetwork[uuid:{0}] to cluster[uuid:{1}], which L2VirtualSwitchNetwork should be used", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not attach L2Network to KVM cluster, because the l2Network[uuid:%s] is default vSwitch", "en_US": "could not attach L2Network to KVM cluster, because the l2Network[uuid:{0}] is default vSwitch", @@ -197,6 +373,68 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not attach L2VirtualSwitchNetwork, because interface[%s] in cluster[uuid:%s] is already used for another L2VirtualSwitchNetwork", + "en_US": "could not attach L2VirtualSwitchNetwork, because interface[{0}] in cluster[uuid:{1}] is already used for another L2VirtualSwitchNetwork", + "zh_CN": "", + "arguments": [ + "vo.getPhysicalInterface()", + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not attach L2PortGroupNetwork[uuid:%s] to host[uuid:%s], which L2VirtualSwitchNetwork should be used", + "en_US": "could not attach L2PortGroupNetwork[uuid:{0}] to host[uuid:{1}], which L2VirtualSwitchNetwork should be used", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not attach L2VirtualSwitchNetwork[uuid:%s] to host[uuid:%s], because the physical interface[%s] is invalid", + "en_US": "could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because the physical interface[{2}] is invalid", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getHostUuid()", + "hostParam.getPhysicalInterface()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not attach L2VirtualSwitchNetwork[uuid:%s] to host[uuid:%s], because the pass-through state of physical interface[%s] is [Enabled]", + "en_US": "could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because the pass-through state of physical interface[{2}] is [Enabled]", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getHostUuid()", + "hostParam.getPhysicalInterface()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not attach L2VirtualSwitchNetwork[uuid:%s] to host[uuid:%s], because there is no uplink configured for the virtual switch on the host", + "en_US": "could not attach L2VirtualSwitchNetwork[uuid:{0}] to host[uuid:{1}], because there is no uplink configured for the virtual switch on the host", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not detach L2PortGroupNetwork[uuid:%s] from cluster[uuid:%s], which L2VirtualSwitchNetwork should be used", + "en_US": "could not detach L2PortGroupNetwork[uuid:{0}] from cluster[uuid:{1}], which L2VirtualSwitchNetwork should be used", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getClusterUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not detach L2Network from KVM cluster, because the l2Network[uuid:%s] is default vSwitch", "en_US": "could not detach L2Network from KVM cluster, because the l2Network[uuid:{0}] is default vSwitch", @@ -206,6 +444,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not detach L2PortGroupNetwork[uuid:%s] from host[uuid:%s], which L2VirtualSwitchNetwork should be used", + "en_US": "could not detach L2PortGroupNetwork[uuid:{0}] from host[uuid:{1}], which L2VirtualSwitchNetwork should be used", + "zh_CN": "", + "arguments": [ + "vo.getUuid()", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not detach L2Network from host, because the l2Network[uuid:%s] is default vSwitch", "en_US": "could not detach L2Network from host, because the l2Network[uuid:{0}] is default vSwitch", @@ -215,6 +463,16 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not create port group for L2Network[uuid:%s]that does not belong to vSwitch[uuid:%s]", + "en_US": "could not create port group for L2Network[uuid:{0}]that does not belong to vSwitch[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "msg.getL2NetworkUuid()", + "msg.getvSwitchUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not create l3 network on virtual switch[uuid:%s]", "en_US": "could not create l3 network on virtual switch[uuid:{0}]", @@ -240,6 +498,59 @@ "arguments": [], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "could not update vlan for port group, because L2VirtualSwitchNetwork[uuid:%s] already has L2PortGroupNetworks with the same vlanId[%s]", + "en_US": "could not update vlan for port group, because L2VirtualSwitchNetwork[uuid:{0}] already has L2PortGroupNetworks with the same vlanId[{1}]", + "zh_CN": "", + "arguments": [ + "vo.getvSwitchUuid()", + "msg.getVirtualNetworkId()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update uplink bonding of default vSwitch when it is still attached to hosts with uplink bonding exist", + "en_US": "could not update uplink bonding of default vSwitch when it is still attached to hosts with uplink bonding exist", + "zh_CN": "", + "arguments": [ + "vo.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "bondingName cannot be empty when virtual switch has no uplink bonding config", + "en_US": "bondingName cannot be empty when virtual switch has no uplink bonding config", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update uplink bonding name because the version of the virtual switch[uuid:%s] is old", + "en_US": "could not update uplink bonding name because the version of the virtual switch[uuid:{0}] is old", + "zh_CN": "", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update uplink bonding name when virtual switch has uplink bonding group", + "en_US": "could not update uplink bonding name when virtual switch has uplink bonding group", + "zh_CN": "", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update uplink bonding name which has been occupied by another virtual switch attached to the same cluster", + "en_US": "could not update uplink bonding name which has been occupied by another virtual switch attached to the same cluster", + "zh_CN": "", + "arguments": [ + "msg.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "virtual switch[uuid:%s] has not attached to host[uuid:%s]", "en_US": "virtual switch[uuid:{0}] has not attached to host[uuid:{1}]", @@ -280,6 +591,26 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" }, + { + "raw": "cannot update uplink to bonding, because bonding[%s] already exists on host[uuid:%s]", + "en_US": "cannot update uplink to bonding, because bonding[{0}] already exists on host[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "vo.getPhysicalInterface()", + "msg.getHostUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, + { + "raw": "could not update mode or xmit_hash_policy of bonding[uuid:%s] which is in use by virtual switch[uuid:%s]", + "en_US": "could not update mode or xmit_hash_policy of bonding[uuid:{0}] which is in use by virtual switch[uuid:{1}]", + "zh_CN": "", + "arguments": [ + "bondingVO.getUuid()", + "vSwitchUuid" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchApiInterceptor.java" + }, { "raw": "could not delete bonding[uuid:%s], because it is in use by virtual switch[uuid:%s]", "en_US": "could not delete bonding[uuid:{0}], because it is in use by virtual switch[uuid:{1}]", @@ -322,6 +653,19 @@ ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchManagerImpl.java" }, + { + "raw": "failed to create default port group, because the bridge name[%s] of managementIp[%s] must be the same as the bridge name[%s] of vlanId[%s] on default virtual switch[%s]", + "en_US": "failed to create default port group, because the bridge name[{0}] of managementIp[{1}] must be the same as the bridge name[{2}] of vlanId[{3}] on default virtual switch[{4}]", + "zh_CN": "", + "arguments": [ + "interfaceTO.getBridgeName()", + "host.getManagementIp()", + "tagBridgeName", + "pvo.getVlanId()", + "vSwitchVO.getUuid()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchManagerImpl.java" + }, { "raw": "failed to get the host interface for the managementIp[%s]", "en_US": "failed to get the host interface for the managementIp[{0}]", @@ -330,5 +674,29 @@ "host.getManagementIp()" ], "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchManagerImpl.java" + }, + { + "raw": "failed to create default kernel interface,because the uplink bonding[name:%s] of managementIp[%s] must be the same as cluster[uuid:%s] default uplink bonding[name:%s]", + "en_US": "failed to create default kernel interface,because the uplink bonding[name:{0}] of managementIp[{1}] must be the same as cluster[uuid:{2}] default uplink bonding[name:{3}]", + "zh_CN": "", + "arguments": [ + "bond.getBondingName()", + "host.getManagementIp()", + "host.getClusterUuid()", + "vsvo.getPhysicalInterface()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchManagerImpl.java" + }, + { + "raw": "failed to create default port group, because the vlanId[%s] of managementIp[%s] must be the same as cluster[uuid:%s] default vlanId[%s]", + "en_US": "failed to create default port group, because the vlanId[{0}] of managementIp[{1}] must be the same as cluster[uuid:{2}] default vlanId[{3}]", + "zh_CN": "", + "arguments": [ + "ctx.interfaceTO.getVlanId()", + "host.getManagementIp()", + "host.getClusterUuid()", + "ctx.pvo.getVlanId()" + ], + "fileName": "src/main/java/org/zstack/network/l2/virtualSwitch/VirtualSwitchManagerImpl.java" } ] \ No newline at end of file diff --git a/conf/i18n_json/i18n_volumebackup.json b/conf/i18n_json/i18n_volumebackup.json index b69ef2fa584..6b672e0c089 100644 --- a/conf/i18n_json/i18n_volumebackup.json +++ b/conf/i18n_json/i18n_volumebackup.json @@ -443,6 +443,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/storage/backup/VolumeBackupApiInterceptor.java" }, + { + "raw": "The vm is creating a backup job, cannot enable the cdp task at the same time.", + "en_US": "The vm is creating a backup job, cannot enable the cdp task at the same time.", + "zh_CN": "虚拟机正在创建备份任务,无法同时启用 CDP 任务", + "arguments": [], + "fileName": "src/main/java/org/zstack/storage/backup/VolumeBackupApiInterceptor.java" + }, { "raw": "cannot find volume backup[uuid:%s]", "en_US": "cannot find volume backup[uuid:{0}]", @@ -647,6 +654,16 @@ ], "fileName": "src/main/java/org/zstack/storage/backup/VolumeBackupManagerImpl.java" }, + { + "raw": "generate volume backup metadata file on image store[uuid:%s] failure, because IO error: %s", + "en_US": "generate volume backup metadata file on image store[uuid:{0}] failure, because IO error: {1}", + "zh_CN": "在镜像存储[uuid:{0}] 上生成卷备份元数据文件出现 IO 错误: {1}", + "arguments": [ + "inv.getUuid()", + "e.getMessage()" + ], + "fileName": "src/main/java/org/zstack/storage/backup/VolumeBackupMetadataMaker.java" + }, { "raw": "volume backup metadata operation failure, because %s", "en_US": "volume backup metadata operation failure, because {0}", diff --git a/conf/i18n_json/i18n_vpc.json b/conf/i18n_json/i18n_vpc.json index d4b0f9477f1..027cb40c173 100644 --- a/conf/i18n_json/i18n_vpc.json +++ b/conf/i18n_json/i18n_vpc.json @@ -87,6 +87,16 @@ ], "fileName": "src/main/java/org/zstack/ipsec/IPsecApiInterceptor.java" }, + { + "raw": "there already have ipsec connection[uuid:%s, name:%s] with the same vrouter and peerAddress", + "en_US": "there already have ipsec connection[uuid:{0}, name:{1}] with the same vrouter and peerAddress", + "zh_CN": "", + "arguments": [ + "tuples.get(0).get(0, String.class)", + "tuples.get(0).get(1, String.class)" + ], + "fileName": "src/main/java/org/zstack/ipsec/IPsecApiInterceptor.java" + }, { "raw": "the vip[uuid:%s] has been used for %s", "en_US": "the vip[uuid:{0}] has been used for {1}", @@ -384,6 +394,28 @@ ], "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" }, + { + "raw": "l3 network[uuid:%s] can not detach from vpc vrouter[uuid:%s] since network services attached vips[%s] still used in l3", + "en_US": "l3 network[uuid:{0}] can not detach from vpc vrouter[uuid:{1}] since network services attached vips[{2}] still used in l3", + "zh_CN": "", + "arguments": [ + "l3NetworkVO.getUuid()", + "vmInstanceVO.getUuid()", + "vipPeerVOs.stream().map(VipPeerL3NetworkRefVO::getVipUuid).collect(Collectors.toList())" + ], + "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" + }, + { + "raw": "vpc l3 network[uuid:%s] can not detach from vpc vrouter[uuid:%s] since vm nics[%s] still used in l3", + "en_US": "vpc l3 network[uuid:{0}] can not detach from vpc vrouter[uuid:{1}] since vm nics[{2}] still used in l3", + "zh_CN": "", + "arguments": [ + "l3NetworkVO.getUuid()", + "vmInstanceVO.getUuid()", + "vmNicVOS.stream().map(ResourceVO::getUuid).collect(Collectors.toList())" + ], + "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" + }, { "raw": "virtual router offering[uuid: %s] is not enabled", "en_US": "virtual router offering[uuid: {0}] is not enabled", @@ -467,6 +499,26 @@ ], "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" }, + { + "raw": "the static ip[%s] specified in message not equals to gateway ips[%s] of l3 network[uuid:%s]", + "en_US": "the static ip[{0}] specified in message not equals to gateway ips[{1}] of l3 network[uuid:{2}]", + "zh_CN": "", + "arguments": [ + "msg.getStaticIp()", + "gateways", + "l3NetworkVO.getUuid()" + ], + "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" + }, + { + "raw": "l3 network [uuid:%s] must be attached first, because there is vip on that l3 network", + "en_US": "l3 network [uuid:{0}] must be attached first, because there is vip on that l3 network", + "zh_CN": "", + "arguments": [ + "vipL3Uuid" + ], + "fileName": "src/main/java/org/zstack/vpc/VpcApiInterceptor.java" + }, { "raw": "dns[%s] is not a IP address", "en_US": "dns[{0}] is not a IP address", @@ -574,6 +626,13 @@ ], "fileName": "src/main/java/org/zstack/vpc/VpcManagerImpl.java" }, + { + "raw": "vpc l3 network must attach a vpc vrouter first before do anything related to vrouter(like start/stop vm, create lb, etc.)", + "en_US": "vpc l3 network must attach a vpc vrouter first before do anything related to vrouter(like start/stop vm, create lb, etc.)", + "zh_CN": "", + "arguments": [], + "fileName": "src/main/java/org/zstack/vpc/VpcManagerImpl.java" + }, { "raw": "dns address [%s] has bean added to vpc router [uuid:%s]", "en_US": "dns address [{0}] has bean added to vpc router [uuid:{1}]", @@ -622,6 +681,16 @@ ], "fileName": "src/main/java/org/zstack/vpc/VpcVyosDeployZsnAgentFlow.java" }, + { + "raw": "the SSH port is not open after %s seconds. Failed to login the vpc router[ip:%s]", + "en_US": "the SSH port is not open after {0} seconds. Failed to login the vpc router[ip:{1}]", + "zh_CN": "", + "arguments": [ + "timeoutInSeconds", + "mgmtNicIp" + ], + "fileName": "src/main/java/org/zstack/vpc/VpcVyosDeployZsnAgentFlow.java" + }, { "raw": "Could not update this network service, due to vpc [uuid:%s] is not support update network service version", "en_US": "Could not update this network service, due to vpc [uuid:{0}] is not support update network service version", @@ -712,6 +781,16 @@ ], "fileName": "src/main/java/org/zstack/vpc/ha/VpcHaGroupApiInterceptor.java" }, + { + "raw": "vpc router l3 networks [uuid:%s] are different from ha group l3 networks [uuid:%s], !!! please delete this router and recreate it", + "en_US": "vpc router l3 networks [uuid:{0}] are different from ha group l3 networks [uuid:{1}], !!! please delete this router and recreate it", + "zh_CN": "", + "arguments": [ + "vpcL3Uuids", + "vpcHaGroupL3Uuids" + ], + "fileName": "src/main/java/org/zstack/vpc/ha/VpcHaGroupApiInterceptor.java" + }, { "raw": "vpc router has been attached to ha group [uuid:%s]", "en_US": "vpc router has been attached to ha group [uuid:{0}]", @@ -739,6 +818,15 @@ ], "fileName": "src/main/java/org/zstack/vpc/ha/VpcHaGroupApiInterceptor.java" }, + { + "raw": "vpc router [uuid:%s] can not be upgraded to ha router because it public network is same to management network", + "en_US": "vpc router [uuid:{0}] can not be upgraded to ha router because it public network is same to management network", + "zh_CN": "", + "arguments": [ + "haUuid" + ], + "fileName": "src/main/java/org/zstack/vpc/ha/VpcHaGroupApiInterceptor.java" + }, { "raw": "create affinityGroup for ha group [uuid:%s] failed", "en_US": "create affinityGroup for ha group [uuid:{0}] failed", diff --git a/conf/i18n_json/i18n_zbox.json b/conf/i18n_json/i18n_zbox.json index f72c1bf8e34..9e5d0d32dd2 100644 --- a/conf/i18n_json/i18n_zbox.json +++ b/conf/i18n_json/i18n_zbox.json @@ -1,4 +1,14 @@ [ + { + "raw": "usb device[uuid:%s] has been attached VM[uuid:%s], cannot be add to zbox", + "en_US": "usb device[uuid:{0}] has been attached VM[uuid:{1}], cannot be add to zbox", + "zh_CN": "", + "arguments": [ + "msg.getUsbDeviceUuid()", + "inventory.getVmInstanceUuid()" + ], + "fileName": "src/main/java/org/zstack/zbox/ZBoxApiInterceptor.java" + }, { "raw": "zbox[name:%s] status is not Ready, current status is %s", "en_US": "zbox[name:{0}] status is not Ready, current status is {1}", @@ -55,5 +65,15 @@ "zbox.getUuid()" ], "fileName": "src/main/java/org/zstack/zbox/ZBoxFactory.java" + }, + { + "raw": "please attach zbox to %s[uuid:%s] and resume job. if you do not want to continue, cancel it.", + "en_US": "please attach zbox to {0}[uuid:{1}] and resume job. if you do not want to continue, cancel it.", + "zh_CN": "", + "arguments": [ + "type", + "uuid" + ], + "fileName": "src/main/java/org/zstack/zbox/ZBoxUtils.java" } ] \ No newline at end of file diff --git a/conf/i18n_json/i18n_zbs.json b/conf/i18n_json/i18n_zbs.json index 3b718465102..1840a746347 100644 --- a/conf/i18n_json/i18n_zbs.json +++ b/conf/i18n_json/i18n_zbs.json @@ -53,6 +53,15 @@ ], "fileName": "src/main/java/org/zstack/storage/zbs/ZbsStorageController.java" }, + { + "raw": "unable to connect to the ZBS primary storage[uuid:%s], failed to connect all MDS", + "en_US": "unable to connect to the ZBS primary storage[uuid:{0}], failed to connect all MDS", + "zh_CN": "无法连接 ZBS 主存储 [uuid:{0}]: 不能连接所有的 MDS", + "arguments": [ + "self.getUuid()" + ], + "fileName": "src/main/java/org/zstack/storage/zbs/ZbsStorageController.java" + }, { "raw": "ZBS primary storage[uuid:%s] may have been deleted", "en_US": "ZBS primary storage[uuid:{0}] may have been deleted", diff --git a/conf/i18n_json/i18n_zce-x-plugin.json b/conf/i18n_json/i18n_zce-x-plugin.json index 2550ee30263..171dfa1c93a 100644 --- a/conf/i18n_json/i18n_zce-x-plugin.json +++ b/conf/i18n_json/i18n_zce-x-plugin.json @@ -139,6 +139,15 @@ ], "fileName": "src/main/java/org/zstack/zcex/ZceXManager.java" }, + { + "raw": "install.sh not found: %s. install.sh has been manually deleted. please re-upload the installation package", + "en_US": "install.sh not found: {0}. install.sh has been manually deleted. please re-upload the installation package", + "zh_CN": "未找到 install.sh: {0}. install.sh 可能被手动删除, 需要重新上传安装包", + "arguments": [ + "installPath" + ], + "fileName": "src/main/java/org/zstack/zcex/ZceXManager.java" + }, { "raw": "failed to install distributed storage", "en_US": "failed to install distributed storage", @@ -355,6 +364,13 @@ "arguments": [], "fileName": "src/main/java/org/zstack/zcex/client/ZceXLocalClient.java" }, + { + "raw": "admin_token already exists. You must confirm that the current admin_token is no longer in use, delete the token by command \u0027xms-cli access-token delete\u0027, and try again", + "en_US": "admin_token already exists. You must confirm that the current admin_token is no longer in use, delete the token by command \u0027xms-cli access-token delete\u0027, and try again", + "zh_CN": "admin_token 已经存在。请确认当前的 admin_token 不再使用,并通过 \u0027xms-cli access-token delete\u0027 删除它,然后重试", + "arguments": [], + "fileName": "src/main/java/org/zstack/zcex/client/ZceXLocalClient.java" + }, { "raw": "failed to login ZCE-X when creating access token", "en_US": "failed to login ZCE-X when creating access token", diff --git a/conf/i18n_json/i18n_zstone-plugin.json b/conf/i18n_json/i18n_zstone-plugin.json index 62c0689b51c..8344e24a95b 100644 --- a/conf/i18n_json/i18n_zstone-plugin.json +++ b/conf/i18n_json/i18n_zstone-plugin.json @@ -101,6 +101,15 @@ "arguments": [], "fileName": "src/main/java/org/zstack/zstone/ZStoneManager.java" }, + { + "raw": "install.sh not found: %s. install.sh has been manually deleted. please re-upload the installation package", + "en_US": "install.sh not found: {0}. install.sh has been manually deleted. please re-upload the installation package", + "zh_CN": "未找到 install.sh: {0}. install.sh 可能被手动删除, 需要重新上传安装包", + "arguments": [ + "installPath" + ], + "fileName": "src/main/java/org/zstack/zstone/ZStoneManager.java" + }, { "raw": "failed to install distributed storage", "en_US": "failed to install distributed storage", diff --git a/conf/i18n_json/i18n_zwatch.json b/conf/i18n_json/i18n_zwatch.json index 096258231a2..2ad09d02d09 100644 --- a/conf/i18n_json/i18n_zwatch.json +++ b/conf/i18n_json/i18n_zwatch.json @@ -752,6 +752,16 @@ ], "fileName": "src/main/java/org/zstack/zwatch/function/SortFunction.java" }, + { + "raw": "there are multiple EventFamily with the name[%s], you must specify the label[%s]", + "en_US": "there are multiple EventFamily with the name[{0}], you must specify the label[{1}]", + "zh_CN": "存在多个同名[{0}]的事件族,必须指定标签[{1}]", + "arguments": [ + "name.getValue()", + "InfluxEventDataV2.FIELD_NAMESPACE" + ], + "fileName": "src/main/java/org/zstack/zwatch/migratedb/MigrateDBEventDatabaseDriver.java" + }, { "raw": "invalid query label[%s]. Allowed label names are %s", "en_US": "invalid query label[{0}]. Allowed label names are {1}", From cff74aaa72b5cf5c78e44366d330f4f98425cb5e Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Mon, 26 Jan 2026 14:49:19 +0800 Subject: [PATCH 05/76] [storage]: remove CreateVmInstance.dataDiskOfferingUuids usage update BatchCreateVmFailDeadlockCase, BatchCreateVmFailOnLocalStorageCase, VmOperationMultyTypeStorageCase. This patch is for zsv_4.10.28 Related: ZSV-8585 Change-Id: I71617374747574706576627a74646875746d6b68 --- .../BatchCreateVmFailDeadlockCase.groovy | 27 +++++++-------- ...BatchCreateVmFailOnLocalStorageCase.groovy | 27 +++++++-------- .../VmOperationMultyTypeStorageCase.groovy | 33 ++++++++++++------- 3 files changed, 44 insertions(+), 43 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/db/deadlock/BatchCreateVmFailDeadlockCase.groovy b/test/src/test/groovy/org/zstack/test/integration/db/deadlock/BatchCreateVmFailDeadlockCase.groovy index c3cd5d2f91e..a97fe321980 100644 --- a/test/src/test/groovy/org/zstack/test/integration/db/deadlock/BatchCreateVmFailDeadlockCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/db/deadlock/BatchCreateVmFailDeadlockCase.groovy @@ -51,17 +51,6 @@ class BatchCreateVmFailDeadlockCase extends SubCase{ @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(1) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -163,10 +152,8 @@ class BatchCreateVmFailDeadlockCase extends SubCase{ } void testBatchCreateVm() { - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") L3NetworkInventory l3 = env.inventoryByName("l3") ImageInventory image = env.inventoryByName("iso") - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") env.afterSimulator(FlatDhcpBackend.BATCH_APPLY_DHCP_PATH) {FlatDhcpBackend.ApplyDhcpRsp rsp, HttpEntity e -> Random r = new Random() @@ -187,12 +174,20 @@ class BatchCreateVmFailDeadlockCase extends SubCase{ def thread = Thread.start { CreateVmInstanceAction action = new CreateVmInstanceAction( name : "test-" + i, - instanceOfferingUuid : instanceOffering.uuid, + cpuNum: 1, + memorySize: gb(1), l3NetworkUuids : [l3.uuid], imageUuid : image.uuid, - rootDiskOfferingUuid : diskOffering.uuid, - dataDiskOfferingUuids: [diskOffering.uuid], sessionId: Test.currentEnvSpec.session.uuid, + diskAOs: [ + [ + "boot" : true, + "size" : gb(1), + ], + [ + "size" : gb(1), + ] + ] ) CreateVmInstanceAction.Result ret = action.call() diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/BatchCreateVmFailOnLocalStorageCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/BatchCreateVmFailOnLocalStorageCase.groovy index b6c6712eba9..30aca2f2fb5 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/BatchCreateVmFailOnLocalStorageCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/BatchCreateVmFailOnLocalStorageCase.groovy @@ -30,17 +30,6 @@ class BatchCreateVmFailOnLocalStorageCase extends SubCase{ @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(1) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -125,10 +114,8 @@ class BatchCreateVmFailOnLocalStorageCase extends SubCase{ PrimaryStorageGlobalConfig.RESERVED_CAPACITY.updateValue(0) PrimaryStorageInventory ps = env.inventoryByName("local") - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") L3NetworkInventory l3 = env.inventoryByName("l3") ImageInventory image = env.inventoryByName("iso") - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") AtomicInteger errorNum = new AtomicInteger(0) @@ -140,11 +127,19 @@ class BatchCreateVmFailOnLocalStorageCase extends SubCase{ def thread = Thread.start { CreateVmInstanceAction action = new CreateVmInstanceAction( name : "test-" + i, - instanceOfferingUuid : instanceOffering.uuid, + cpuNum: 1, + memorySize: gb(1), l3NetworkUuids : [l3.uuid], imageUuid : image.uuid, - dataDiskOfferingUuids: [diskOffering.uuid], - sessionId: Test.currentEnvSpec.session.uuid + sessionId: Test.currentEnvSpec.session.uuid, + diskAOs: [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ] + ] ) CreateVmInstanceAction.Result ret = action.call() diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/VmOperationMultyTypeStorageCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/VmOperationMultyTypeStorageCase.groovy index d377c65fd3c..1ef182a4e3c 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/VmOperationMultyTypeStorageCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/VmOperationMultyTypeStorageCase.groovy @@ -3,7 +3,6 @@ package org.zstack.test.integration.storage.primary.local_nfs import org.zstack.header.storage.primary.PrimaryStorageStateEvent import org.zstack.sdk.AttachDataVolumeToVmAction import org.zstack.sdk.CreateVmInstanceAction -import org.zstack.sdk.DiskOfferingInventory import org.zstack.sdk.ImageInventory import org.zstack.sdk.InstanceOfferingInventory import org.zstack.sdk.L3NetworkInventory @@ -48,25 +47,28 @@ class VmOperationMultyTypeStorageCase extends SubCase{ void testCreateVmChooseNfs(){ PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory - InstanceOfferingInventory ins = env.inventoryByName("instanceOffering") as InstanceOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory CreateVmInstanceAction a = new CreateVmInstanceAction() a.name = "vm" - a.instanceOfferingUuid = ins.uuid + a.cpuNum = 4 + a.memorySize = gb(8) a.imageUuid = image.uuid a.l3NetworkUuids = [l3.uuid] - a.primaryStorageUuidForRootVolume = nfs.uuid a.sessionId = currentEnvSpec.session.uuid + a.diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ] + ] assert a.call().error == null } void testDisableNfsPrimaryStorageThenCreateVmInstance(){ PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory - InstanceOfferingInventory ins = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - DiskOfferingInventory diskOfferingInventory = env.inventoryByName("diskOffering") ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -77,11 +79,20 @@ class VmOperationMultyTypeStorageCase extends SubCase{ CreateVmInstanceAction a = new CreateVmInstanceAction() a.name = "vm1" - a.instanceOfferingUuid = ins.uuid + a.cpuNum = 4 + a.memorySize = gb(8) a.imageUuid = image.uuid a.l3NetworkUuids = [l3.uuid] - a.dataDiskOfferingUuids = [diskOfferingInventory.uuid] a.sessionId = currentEnvSpec.session.uuid + a.diskAOs = [ + [ + "boot" : true, + ], + [ + "boot" : false, + "size" : gb(20), + ] + ] CreateVmInstanceAction.Result result = a.call() assert result.error == null @@ -95,7 +106,6 @@ class VmOperationMultyTypeStorageCase extends SubCase{ void testDisableNfsPrimaryStorageThenAttachDataVolumeToVm(){ PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory InstanceOfferingInventory ins = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - DiskOfferingInventory diskOfferingInventory = env.inventoryByName("diskOffering") ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -108,12 +118,13 @@ class VmOperationMultyTypeStorageCase extends SubCase{ name = "vm2" imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - instanceOfferingUuid = ins.uuid + cpuNum = 4 + memorySize = gb(8) } VolumeInventory volume = createDataVolume { name = "data" - diskOfferingUuid = diskOfferingInventory.uuid + diskSize = gb(20) } changePrimaryStorageState { From 6c48623106f18d6df61e4b8b92310953c1935db2 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Mon, 26 Jan 2026 18:30:09 +0800 Subject: [PATCH 06/76] [utils]: correct CPU model and MAC retrieval in license check This patch is for zsv_4.10.28; This patch is cherry-picked from ZSTAC-79487, see commit bd5f6d644a7dc92f2d9576d62113608478efeeff. Resolves: ZSV-11252 Change-Id: I646379678a7923767067706e7a6e667a796f686d --- utils/src/main/java/org/zstack/utils/network/NetworkUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/src/main/java/org/zstack/utils/network/NetworkUtils.java b/utils/src/main/java/org/zstack/utils/network/NetworkUtils.java index 73f7b9dab72..e69be9734e0 100755 --- a/utils/src/main/java/org/zstack/utils/network/NetworkUtils.java +++ b/utils/src/main/java/org/zstack/utils/network/NetworkUtils.java @@ -532,7 +532,7 @@ public static List getFreeIpv6InRange(String startIp, String endIp, List } public static List getAllMac() { - ShellResult res = ShellUtils.runAndReturn("ip a | awk '/ether/ {print $2}' | sort -u"); + ShellResult res = ShellUtils.runAndReturn("ip a | awk '/link\\// && !/loopback/ {print $2}' | sort -u"); if (!res.isReturnCode(0)) { throw new RuntimeException("Fail to get mac address"); From 12a8d54dccab441b5414599779f38bf1db26b0ef Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 27 Jan 2026 15:51:40 +0800 Subject: [PATCH 07/76] [storage]: remove CreateVmInstance.dataDiskOfferingUuids usage update LocalNfsMultiCombineCase and OnePsCreateVmCase This patch is for zsv_4.10.28 Related: ZSV-8585 Change-Id: I64646671626761696b6a7577676a68787871726a --- .../local_nfs/LocalNfsMultiCombineCase.groovy | 1080 ++++++++++------- .../allocator/OnePsCreateVmCase.groovy | 71 +- 2 files changed, 706 insertions(+), 445 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/LocalNfsMultiCombineCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/LocalNfsMultiCombineCase.groovy index e0443f39bfa..53aa1867bb8 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/LocalNfsMultiCombineCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/LocalNfsMultiCombineCase.groovy @@ -1,18 +1,15 @@ package org.zstack.test.integration.storage.primary.local_nfs -import org.zstack.compute.vm.VmSystemTags import org.zstack.core.db.Q import org.zstack.core.db.SQL import org.zstack.header.image.ImageConstant import org.zstack.header.storage.primary.PrimaryStorageCapacityVO import org.zstack.header.storage.primary.PrimaryStorageCapacityVO_ import org.zstack.header.storage.primary.PrimaryStorageStateEvent -import org.zstack.header.storage.primary.PrimaryStorageVO_ import org.zstack.header.volume.VolumeStatus import org.zstack.header.volume.VolumeVO import org.zstack.header.volume.VolumeVO_ import org.zstack.sdk.ClusterInventory -import org.zstack.sdk.CreateVmInstanceAction import org.zstack.sdk.DiskOfferingInventory import org.zstack.sdk.ImageInventory import org.zstack.sdk.InstanceOfferingInventory @@ -35,8 +32,6 @@ class LocalNfsMultiCombineCase extends SubCase { PrimaryStorageInventory local2 PrimaryStorageInventory nfs PrimaryStorageInventory nfs2 - InstanceOfferingInventory instanceOffering - DiskOfferingInventory diskOffering ImageInventory qcow2 ImageInventory iso L3NetworkInventory l3 @@ -55,17 +50,6 @@ class LocalNfsMultiCombineCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(1) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -157,8 +141,6 @@ class LocalNfsMultiCombineCase extends SubCase { local2 = env.inventoryByName("local2") as PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory nfs2 = env.inventoryByName("nfs2") as PrimaryStorageInventory - instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory qcow2 = env.inventoryByName("image") as ImageInventory iso = env.inventoryByName("iso") as ImageInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -173,111 +155,179 @@ class LocalNfsMultiCombineCase extends SubCase { } void test2Local1NfsQcow2() { - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 101: not assign ps") + createVmInstance { + name = "vm101" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } + logger.info("Test 102: assign ps") createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + name = "vm102" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] } - CreateVmInstanceAction action = new CreateVmInstanceAction() - action.name = "vm1" - action.instanceOfferingUuid = instanceOffering.uuid - action.imageUuid = qcow2.uuid - action.l3NetworkUuids = [l3.uuid] - action.primaryStorageUuidForRootVolume = nfs.uuid - action.dataDiskOfferingUuids = [diskOffering.uuid] - action.sessionId = adminSession() - CreateVmInstanceAction.Result ret = action.call() - assert ret.error == null + logger.info("Test 103:") + createVmInstance { + delegate.name = "vm103" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = qcow2.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] + } + logger.info("Test 104:") createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + name = "vm104" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] } - CreateVmInstanceAction a = new CreateVmInstanceAction() - a.name = "vm1" - a.instanceOfferingUuid = instanceOffering.uuid - a.imageUuid = qcow2.uuid - a.l3NetworkUuids = [l3.uuid] - a.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - a.dataDiskOfferingUuids = [diskOffering.uuid] - a.sessionId = adminSession() - CreateVmInstanceAction.Result r = a.call() - assert r.error == null - + logger.info("Test 105:") + createVmInstance { + delegate.name = "vm105" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = qcow2.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } - // assign data nfs , root volume ls ps - vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 106: assign data nfs , root volume local ps") + def vm = createVmInstance { + name = "vm106" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - primaryStorageUuidForRootVolume = local.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, nfs.uuid) checkVmRootDiskPs(vm, local.uuid) - // assign data ls , root volume ls ps - CreateVmInstanceAction action2 = new CreateVmInstanceAction() - action2.name = "vm1" - action2.instanceOfferingUuid = instanceOffering.uuid - action2.imageUuid = qcow2.uuid - action2.l3NetworkUuids = [l3.uuid] - action2.primaryStorageUuidForRootVolume = local.uuid - action2.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action2.dataDiskOfferingUuids = [diskOffering.uuid] - action2.sessionId = adminSession() - CreateVmInstanceAction.Result ret2 = action2.call() - checkVmRootDiskPs(ret2.value.inventory, local.uuid) - checkVmDataDiskPs(ret2.value.inventory, local.uuid) - - // assign data nfs , root volume nfs ps - CreateVmInstanceAction action3 = new CreateVmInstanceAction() - action3.name = "vm1" - action3.instanceOfferingUuid = instanceOffering.uuid - action3.imageUuid = qcow2.uuid - action3.l3NetworkUuids = [l3.uuid] - action3.primaryStorageUuidForRootVolume = nfs.uuid - action3.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - action3.dataDiskOfferingUuids = [diskOffering.uuid] - action3.sessionId = adminSession() - CreateVmInstanceAction.Result ret3 = action3.call() - checkVmRootDiskPs(ret3.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret3.value.inventory, nfs.uuid) - - // assign data ls , root volume nfs ps - CreateVmInstanceAction action4 = new CreateVmInstanceAction() - action4.name = "vm1" - action4.instanceOfferingUuid = instanceOffering.uuid - action4.imageUuid = qcow2.uuid - action4.l3NetworkUuids = [l3.uuid] - action4.primaryStorageUuidForRootVolume = nfs.uuid - action4.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action4.dataDiskOfferingUuids = [diskOffering.uuid] - action4.sessionId = adminSession() - CreateVmInstanceAction.Result ret4 = action4.call() - checkVmRootDiskPs(ret4.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret4.value.inventory, local.uuid) + logger.info("Test 107: assign data ls , root volume local ps") + vm = createVmInstance { + delegate.name = "vm107" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = qcow2.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, local.uuid) + checkVmDataDiskPs(vm, local.uuid) + + logger.info("Test 108: assign data nfs , root volume nfs ps") + vm = createVmInstance { + delegate.name = "vm108" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = qcow2.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, nfs.uuid) + + logger.info("Test 109: assign data local, root volume nfs ps") + vm = createVmInstance { + delegate.name = "vm109" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = qcow2.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) } void test1Local2NfsQcow2() { @@ -300,13 +350,21 @@ class LocalNfsMultiCombineCase extends SubCase { .set(PrimaryStorageCapacityVO_.availableCapacity, 0L) .update() - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 201: not assign ps") + createVmInstance { + name = "vm201" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } assert !Q.New(VolumeVO.class).eq(VolumeVO_.status, VolumeStatus.NotInstantiated).isExists() @@ -315,106 +373,162 @@ class LocalNfsMultiCombineCase extends SubCase { .set(PrimaryStorageCapacityVO_.availableCapacity, originCap) .update() - // assign root volume ls ps - vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 202: assign root volume local ps") + createVmInstance { + name = "vm202" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - } - - // assign root volume nfs ps - CreateVmInstanceAction action = new CreateVmInstanceAction() - action.name = "vm1" - action.instanceOfferingUuid = instanceOffering.uuid - action.imageUuid = qcow2.uuid - action.l3NetworkUuids = [l3.uuid] - action.primaryStorageUuidForRootVolume = nfs.uuid - action.dataDiskOfferingUuids = [diskOffering.uuid] - action.sessionId = adminSession() - CreateVmInstanceAction.Result ret = action.call() - assert ret.error == null - - // assign data volume nfs ps - vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } + + logger.info("Test 203: assign root volume nfs ps") + createVmInstance { + name = "vm203" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] } - // assign data volume ls ps - CreateVmInstanceAction a = new CreateVmInstanceAction() - a.name = "vm1" - a.instanceOfferingUuid = instanceOffering.uuid - a.imageUuid = qcow2.uuid - a.l3NetworkUuids = [l3.uuid] - a.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - a.dataDiskOfferingUuids = [diskOffering.uuid] - a.sessionId = adminSession() - CreateVmInstanceAction.Result r = a.call() - assert ret.error == null - + logger.info("Test 204: assign data volume nfs ps") + createVmInstance { + name = "vm204" + cpuNum = 1 + memorySize = gb(1) + imageUuid = qcow2.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } - // assign data nfs , root volume ls ps - vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 205: assign data volume local ps") + createVmInstance { + name = "vm205" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - primaryStorageUuidForRootVolume = local.uuid + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] } + + logger.info("Test 206: assign data nfs, root volume local ps") + def vm = createVmInstance { + name = "vm206" + cpuNum = 1 + memorySize = gb(1) + imageUuid = qcow2.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, nfs.uuid) checkVmRootDiskPs(vm, local.uuid) - // assign data ls , root volume ls ps - CreateVmInstanceAction action2 = new CreateVmInstanceAction() - action2.name = "vm1" - action2.instanceOfferingUuid = instanceOffering.uuid - action2.imageUuid = qcow2.uuid - action2.l3NetworkUuids = [l3.uuid] - action2.primaryStorageUuidForRootVolume = local.uuid - action2.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action2.dataDiskOfferingUuids = [diskOffering.uuid] - action2.sessionId = adminSession() - CreateVmInstanceAction.Result ret2 = action2.call() - checkVmRootDiskPs(ret2.value.inventory, local.uuid) - checkVmDataDiskPs(ret2.value.inventory, local.uuid) - - // assign data nfs , root volume nfs ps - CreateVmInstanceAction action3 = new CreateVmInstanceAction() - action3.name = "vm1" - action3.instanceOfferingUuid = instanceOffering.uuid - action3.imageUuid = qcow2.uuid - action3.l3NetworkUuids = [l3.uuid] - action3.primaryStorageUuidForRootVolume = nfs.uuid - action3.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - action3.dataDiskOfferingUuids = [diskOffering.uuid] - action3.sessionId = adminSession() - CreateVmInstanceAction.Result ret3 = action3.call() - checkVmRootDiskPs(ret3.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret3.value.inventory, nfs.uuid) - - // assign data ls , root volume nfs ps - CreateVmInstanceAction action4 = new CreateVmInstanceAction() - action4.name = "vm1" - action4.instanceOfferingUuid = instanceOffering.uuid - action4.imageUuid = qcow2.uuid - action4.l3NetworkUuids = [l3.uuid] - action4.primaryStorageUuidForRootVolume = nfs.uuid - action4.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action4.dataDiskOfferingUuids = [diskOffering.uuid] - action4.sessionId = adminSession() - CreateVmInstanceAction.Result ret4 = action4.call() - checkVmRootDiskPs(ret4.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret4.value.inventory, local.uuid) + logger.info("Test 207: assign data local, root volume local ps") + vm = createVmInstance { + name = "vm207" + cpuNum = 1 + memorySize = gb(1) + imageUuid = qcow2.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, local.uuid) + checkVmDataDiskPs(vm, local.uuid) + + logger.info("Test 208: assign data nfs , root volume nfs ps") + vm = createVmInstance { + name = "vm208" + cpuNum = 1 + memorySize = gb(1) + imageUuid = qcow2.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, nfs.uuid) + + logger.info("Test 209: assign data local , root volume nfs ps") + vm = createVmInstance { + name = "vm209" + cpuNum = 1 + memorySize = gb(1) + imageUuid = qcow2.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) } void test2Local2NfsQcow2() { @@ -428,13 +542,21 @@ class LocalNfsMultiCombineCase extends SubCase { stateEvent = PrimaryStorageStateEvent.disable.toString() } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 301: not assign ps") + createVmInstance { + name = "vm301" + cpuNum = 1 + memorySize = gb(1) imageUuid = qcow2.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } detachPrimaryStorageFromCluster { @@ -468,122 +590,188 @@ class LocalNfsMultiCombineCase extends SubCase { primaryStorageUuid = nfs.uuid clusterUuid = cluster.uuid } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + + logger.info("Test 401: not assign ps") + createVmInstance { + name = "vm401" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + ], + ] } - // assign root volume ls ps - vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 402: assign root volume local ps") + createVmInstance { + name = "vm402" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - } - - // assign root volume nfs ps - CreateVmInstanceAction action = new CreateVmInstanceAction() - action.name = "vm1" - action.instanceOfferingUuid = instanceOffering.uuid - action.imageUuid = iso.uuid - action.l3NetworkUuids = [l3.uuid] - action.primaryStorageUuidForRootVolume = nfs.uuid - action.dataDiskOfferingUuids = [diskOffering.uuid] - action.rootDiskOfferingUuid = diskOffering.uuid - action.sessionId = adminSession() - CreateVmInstanceAction.Result ret = action.call() - assert ret.error == null - - // assign data volume nfs ps - vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + ], + ] + } + + logger.info("Test 403: assign root volume nfs ps") + createVmInstance { + name = "vm403" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - } - - // assign data volume ls ps - CreateVmInstanceAction a = new CreateVmInstanceAction() - a.name = "vm1" - a.instanceOfferingUuid = instanceOffering.uuid - a.imageUuid = iso.uuid - a.l3NetworkUuids = [l3.uuid] - a.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - a.dataDiskOfferingUuids = [diskOffering.uuid] - a.rootDiskOfferingUuid = diskOffering.uuid - a.sessionId = adminSession() - CreateVmInstanceAction.Result r = a.call() - assert r.error == null - - // assign data nfs , root volume ls ps - vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] + } + + logger.info("Test 404: assign data volume nfs ps") + createVmInstance { + name = "vm404" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - primaryStorageUuidForRootVolume = local.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] } + + logger.info("Test 405: assign data volume local ps") + createVmInstance { + name = "vm405" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } + + logger.info("Test 406: assign data nfs, root volume local ps") + def vm = createVmInstance { + name = "vm406" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, nfs.uuid) checkVmRootDiskPs(vm, local.uuid) - // assign data ls , root volume ls ps - CreateVmInstanceAction action2 = new CreateVmInstanceAction() - action2.name = "vm1" - action2.instanceOfferingUuid = instanceOffering.uuid - action2.imageUuid = iso.uuid - action2.l3NetworkUuids = [l3.uuid] - action2.primaryStorageUuidForRootVolume = local.uuid - action2.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action2.dataDiskOfferingUuids = [diskOffering.uuid] - action2.rootDiskOfferingUuid = diskOffering.uuid - action2.sessionId = adminSession() - CreateVmInstanceAction.Result ret2 = action2.call() - checkVmDataDiskPs(ret2.value.inventory, local.uuid) - checkVmRootDiskPs(ret2.value.inventory, local.uuid) - - // assign data nfs , root volume nfs ps - CreateVmInstanceAction action3 = new CreateVmInstanceAction() - action3.name = "vm1" - action3.instanceOfferingUuid = instanceOffering.uuid - action3.imageUuid = iso.uuid - action3.l3NetworkUuids = [l3.uuid] - action3.primaryStorageUuidForRootVolume = nfs.uuid - action3.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - action3.dataDiskOfferingUuids = [diskOffering.uuid] - action3.rootDiskOfferingUuid = diskOffering.uuid - action3.sessionId = adminSession() - CreateVmInstanceAction.Result ret3 = action3.call() - checkVmDataDiskPs(ret3.value.inventory, nfs.uuid) - checkVmRootDiskPs(ret3.value.inventory, nfs.uuid) - - // assign data ls , root volume nfs ps - CreateVmInstanceAction action4 = new CreateVmInstanceAction() - action4.name = "vm1" - action4.instanceOfferingUuid = instanceOffering.uuid - action4.imageUuid = iso.uuid - action4.l3NetworkUuids = [l3.uuid] - action4.primaryStorageUuidForRootVolume = nfs.uuid - action4.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action4.dataDiskOfferingUuids = [diskOffering.uuid] - action4.rootDiskOfferingUuid = diskOffering.uuid - action4.sessionId = adminSession() - CreateVmInstanceAction.Result ret4 = action4.call() - checkVmRootDiskPs(ret4.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret4.value.inventory, local.uuid) + logger.info("Test 407: assign data local, root volume local ps") + vm = createVmInstance { + name = "vm407" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmDataDiskPs(vm, local.uuid) + checkVmRootDiskPs(vm, local.uuid) + + logger.info("Test 408: assign data nfs, root volume nfs ps") + vm = createVmInstance { + name = "vm408" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmDataDiskPs(vm, nfs.uuid) + checkVmRootDiskPs(vm, nfs.uuid) + + logger.info("Test 409: assign data local, root volume nfs ps") + vm = createVmInstance { + name = "vm409" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) } void test1Local2NfsISO() { @@ -597,123 +785,187 @@ class LocalNfsMultiCombineCase extends SubCase { clusterUuid = cluster.uuid } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 501: not assign ps") + createVmInstance { + name = "vm501" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + ], + ] } - // assign root volume ls ps - vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 502: assign root volume local ps") + createVmInstance { + name = "vm502" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - } - - // assign root volume nfs ps - CreateVmInstanceAction action = new CreateVmInstanceAction() - action.name = "vm1" - action.instanceOfferingUuid = instanceOffering.uuid - action.imageUuid = iso.uuid - action.l3NetworkUuids = [l3.uuid] - action.primaryStorageUuidForRootVolume = nfs.uuid - action.dataDiskOfferingUuids = [diskOffering.uuid] - action.rootDiskOfferingUuid = diskOffering.uuid - action.sessionId = adminSession() - CreateVmInstanceAction.Result ret = action.call() - assert ret.error == null - - // assign data volume nfs ps - vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + ], + ] + } + + logger.info("Test 503: assign root volume nfs ps") + createVmInstance { + name = "vm503" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] } - // assign data volume ls ps - CreateVmInstanceAction a = new CreateVmInstanceAction() - a.name = "vm1" - a.instanceOfferingUuid = instanceOffering.uuid - a.imageUuid = iso.uuid - a.l3NetworkUuids = [l3.uuid] - a.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - a.dataDiskOfferingUuids = [diskOffering.uuid] - a.rootDiskOfferingUuid = diskOffering.uuid - a.sessionId = adminSession() - CreateVmInstanceAction.Result r = a.call() - assert r.error == null - + logger.info("Test 504: assign data volume nfs ps") + createVmInstance { + name = "vm504" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } - // assign data nfs , root volume ls ps - vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 505: assign data volume local ps") + createVmInstance { + name = "vm505" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - primaryStorageUuidForRootVolume = local.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] } + + logger.info("Test 506: assign data nfs, root volume local ps") + def vm = createVmInstance { + name = "vm506" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, nfs.uuid) checkVmRootDiskPs(vm, local.uuid) - // assign data ls , root volume ls ps - CreateVmInstanceAction action2 = new CreateVmInstanceAction() - action2.name = "vm1" - action2.instanceOfferingUuid = instanceOffering.uuid - action2.imageUuid = iso.uuid - action2.l3NetworkUuids = [l3.uuid] - action2.primaryStorageUuidForRootVolume = local.uuid - action2.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action2.dataDiskOfferingUuids = [diskOffering.uuid] - action2.rootDiskOfferingUuid = diskOffering.uuid - action2.sessionId = adminSession() - CreateVmInstanceAction.Result ret2 = action2.call() - checkVmDataDiskPs(ret2.value.inventory, local.uuid) - checkVmRootDiskPs(ret2.value.inventory, local.uuid) - - // assign data nfs , root volume nfs ps - CreateVmInstanceAction action3 = new CreateVmInstanceAction() - action3.name = "vm1" - action3.instanceOfferingUuid = instanceOffering.uuid - action3.imageUuid = iso.uuid - action3.l3NetworkUuids = [l3.uuid] - action3.primaryStorageUuidForRootVolume = nfs.uuid - action3.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - action3.dataDiskOfferingUuids = [diskOffering.uuid] - action3.rootDiskOfferingUuid = diskOffering.uuid - action3.sessionId = adminSession() - CreateVmInstanceAction.Result ret3 = action3.call() - checkVmDataDiskPs(ret3.value.inventory, nfs.uuid) - checkVmRootDiskPs(ret3.value.inventory, nfs.uuid) - - // assign data ls , root volume nfs ps - CreateVmInstanceAction action4 = new CreateVmInstanceAction() - action4.name = "vm1" - action4.instanceOfferingUuid = instanceOffering.uuid - action4.imageUuid = iso.uuid - action4.l3NetworkUuids = [l3.uuid] - action4.primaryStorageUuidForRootVolume = nfs.uuid - action4.systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - action4.dataDiskOfferingUuids = [diskOffering.uuid] - action4.rootDiskOfferingUuid = diskOffering.uuid - action4.sessionId = adminSession() - CreateVmInstanceAction.Result ret4 = action4.call() - checkVmRootDiskPs(ret4.value.inventory, nfs.uuid) - checkVmDataDiskPs(ret4.value.inventory, local.uuid) + logger.info("Test 507: assign data ls, root volume local ps") + vm = createVmInstance { + name = "vm507" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmDataDiskPs(vm, local.uuid) + checkVmRootDiskPs(vm, local.uuid) + + logger.info("Test 508: assign data nfs, root volume nfs ps") + vm = createVmInstance { + name = "vm508" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmDataDiskPs(vm, nfs.uuid) + checkVmRootDiskPs(vm, nfs.uuid) + + logger.info("Test 509: assign data local, root volume nfs ps") + vm = createVmInstance { + name = "vm509" + cpuNum = 1 + memorySize = gb(1) + imageUuid = iso.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + "primaryStorageUuid": nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid": local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) } void test2Local2NfsISO() { @@ -727,14 +979,22 @@ class LocalNfsMultiCombineCase extends SubCase { stateEvent = PrimaryStorageStateEvent.disable.toString() } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 601: not assign ps") + createVmInstance { + name = "vm601" + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - rootDiskOfferingUuid = diskOffering.uuid + diskAOs = [ + [ + "boot" : true, + "size": gb(1), + ], + [ + "size" : gb(1), + ], + ] } detachPrimaryStorageFromCluster { @@ -753,9 +1013,9 @@ class LocalNfsMultiCombineCase extends SubCase { } } - void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 0 - for(VolumeInventory disk : vm.allVolumes){ + for(VolumeInventory disk : vm.allVolumes as List){ if(disk.uuid == vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid return @@ -763,9 +1023,9 @@ class LocalNfsMultiCombineCase extends SubCase { } } - void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 1 - for(VolumeInventory disk : vm.allVolumes){ + for(VolumeInventory disk : vm.allVolumes as List){ if(disk.uuid != vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid } diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/OnePsCreateVmCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/OnePsCreateVmCase.groovy index 7632d6992fa..38e9a64bb30 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/OnePsCreateVmCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/OnePsCreateVmCase.groovy @@ -20,22 +20,6 @@ class OnePsCreateVmCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(100) - } - - diskOffering { - name = "diskOffering3" - diskSize = SizeUnit.GIGABYTE.toByte(102) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -111,28 +95,45 @@ class OnePsCreateVmCase extends SubCase { } void createVmVolumeSizeEqualSinglePsCap() { - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - DiskOfferingInventory _100G = env.inventoryByName("diskOffering") as DiskOfferingInventory - DiskOfferingInventory _102G = env.inventoryByName("diskOffering3") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory - CreateVmInstanceAction createVmInstanceAction = new CreateVmInstanceAction( - name: "vm", - instanceOfferingUuid: instanceOffering.uuid, - dataDiskOfferingUuids: [_102G.uuid], - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - sessionId: currentEnvSpec.session.uuid - ) - assert createVmInstanceAction.call().error != null - - VmInstanceInventory vm = createVmInstance { - name = "newVm" - instanceOfferingUuid = instanceOffering.uuid - imageUuid = image.uuid - l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [_100G.uuid] + expectApiFailure({ + createVmInstance { + delegate.name = "vm" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = image.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + ], + [ + "boot" : false, + "size" : gb(102) + ] + ] + } + }) { + assert delegate.code == "SYS.1006" + } + + createVmInstance { + delegate.name = "vm" + delegate.cpuNum = 1 + delegate.memorySize = gb(1) + delegate.imageUuid = image.uuid + delegate.l3NetworkUuids = [l3.uuid] + delegate.diskAOs = [ + [ + "boot" : true, + ], + [ + "boot" : false, + "size" : gb(100) + ] + ] } } } From 0eecc2c6bb809eeb386e2700b588308a86b281ed Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 28 Jan 2026 17:47:28 +0800 Subject: [PATCH 08/76] [volume]: remove CreateVmInstance.dataDiskOfferingUuids usage update cases: * CreateVmAssignNfsPsCase * CreateDataVolumeWithOtherPlatformCase This patch is for zsv_4.10.28 Related: ZSV-8585 Change-Id: I7262796b6b6b73786b786276677870676965796d --- .../local_nfs/CreateVmAssignNfsPsCase.groovy | 439 +++++++++++------- ...eateDataVolumeWithOtherPlatformCase.groovy | 41 +- 2 files changed, 296 insertions(+), 184 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/CreateVmAssignNfsPsCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/CreateVmAssignNfsPsCase.groovy index 7ed58a0954a..24019635ac4 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/CreateVmAssignNfsPsCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/CreateVmAssignNfsPsCase.groovy @@ -1,6 +1,5 @@ package org.zstack.test.integration.storage.primary.local_nfs -import org.zstack.compute.vm.VmSystemTags import org.zstack.sdk.* import org.zstack.test.integration.storage.StorageTest import org.zstack.testlib.EnvSpec @@ -21,12 +20,6 @@ class CreateVmAssignNfsPsCase extends SubCase{ @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - diskOffering { name = "diskOffering" diskSize = SizeUnit.GIGABYTE.toByte(1) @@ -123,11 +116,8 @@ class CreateVmAssignNfsPsCase extends SubCase{ } void localAndLocal(){ - PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory - PrimaryStorageInventory nfs2 = env.inventoryByName("nfs2") as PrimaryStorageInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local2 = env.inventoryByName("local2") as PrimaryStorageInventory - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -148,47 +138,79 @@ class CreateVmAssignNfsPsCase extends SubCase{ assert [dataVolumePrimaryStorages[0].uuid, dataVolumePrimaryStorages[1].uuid].containsAll([local.uuid, local2.uuid]) } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 101: not assign ps") + createVmInstance { + name = "vm101" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } - // assign root volume ps - vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 102: assign root volume ps") + def vm = createVmInstance { + name = "vm102" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) - // assign data volume ps + logger.info("Test 103: assign data volume ps") vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + name = "vm103" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - } + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, local.uuid) - // assign data , root volume ps + logger.info("Test 104: assign data, root volume ps") vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + name = "vm104" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local2.uuid])] - primaryStorageUuidForRootVolume = local.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local2.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, local2.uuid) checkVmRootDiskPs(vm, local.uuid) } @@ -196,10 +218,8 @@ class CreateVmAssignNfsPsCase extends SubCase{ void localAndNfs(){ ClusterInventory cluster = env.inventoryByName("cluster") as ClusterInventory PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory - PrimaryStorageInventory nfs2 = env.inventoryByName("nfs2") as PrimaryStorageInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local2 = env.inventoryByName("local2") as PrimaryStorageInventory - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -232,124 +252,182 @@ class CreateVmAssignNfsPsCase extends SubCase{ - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 201: not assign ps") + createVmInstance { + name = "vm201" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } + + logger.info("Test 202: assign root volume local ps") + def vm = createVmInstance { + name = "vm202" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) - checkVmDataDiskPs(vm, nfs.uuid) - // assign root volume local ps + logger.info("Test 203: assign root volume nfs ps") vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + name = "vm203" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + + logger.info("Test 204: assign data volume local ps") + vm = createVmInstance { + name = "vm204" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) - checkVmDataDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) - // assign root volume nfs ps - CreateVmInstanceAction a = new CreateVmInstanceAction( - name: "vm2", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - primaryStorageUuidForRootVolume: nfs.uuid, - sessionId: currentEnvSpec.session.uuid - ) - CreateVmInstanceAction.Result result = a.call() - checkVmRootDiskPs(result.value.inventory, nfs.uuid) - - // assign data volume local ps - CreateVmInstanceAction a2 = new CreateVmInstanceAction( - sessionId: currentEnvSpec.session.uuid - ) + logger.info("Test 205: assign data volume nfs ps") + vm = createVmInstance { + name = "vm205" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, local.uuid) + checkVmDataDiskPs(vm, nfs.uuid) + logger.info("Test 206: assign root volume local ps, data volume local ps") vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + name = "vm206" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) checkVmDataDiskPs(vm, local.uuid) - // assign data volume nfs ps + logger.info("Test 207: assign root volume nfs ps, data volume local ps") vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + name = "vm207" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - } - checkVmRootDiskPs(vm, local.uuid) + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) + checkVmDataDiskPs(vm, local.uuid) + + logger.info("Test 208: assign root volume nfs ps, data volume nfs ps") + vm = createVmInstance { + name = "vm208" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : nfs.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, nfs.uuid) checkVmDataDiskPs(vm, nfs.uuid) - // assign root volume local ps, data volume local ps, - CreateVmInstanceAction a3 = new CreateVmInstanceAction( - name: "vm3", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: local.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a3.call() - checkVmRootDiskPs(result.value.inventory, local.uuid) - checkVmDataDiskPs(result.value.inventory, local.uuid) - - // assign root volume nfs ps, data volume local ps - CreateVmInstanceAction a4 = new CreateVmInstanceAction( - name: "vm4", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: nfs.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a4.call() - checkVmRootDiskPs(result.value.inventory, nfs.uuid) - checkVmDataDiskPs(result.value.inventory, local.uuid) - - // assign root volume nfs ps, data volume nfs ps - CreateVmInstanceAction a5 = new CreateVmInstanceAction( - name: "vm4", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: nfs.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a5.call() - checkVmRootDiskPs(result.value.inventory, nfs.uuid) - checkVmDataDiskPs(result.value.inventory, nfs.uuid) - - // assign root volume local ps, data volume nfs ps + logger.info("Test 209: assign root volume local ps, data volume nfs ps") vm = createVmInstance { - name = "vm4" - instanceOfferingUuid = instanceOffering.uuid + name = "vm209" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - primaryStorageUuidForRootVolume = local.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) checkVmDataDiskPs(vm, nfs.uuid) } @@ -359,7 +437,6 @@ class CreateVmAssignNfsPsCase extends SubCase{ PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory PrimaryStorageInventory nfs2 = env.inventoryByName("nfs2") as PrimaryStorageInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -390,58 +467,98 @@ class CreateVmAssignNfsPsCase extends SubCase{ assert [dataVolumePrimaryStorages[0].uuid, dataVolumePrimaryStorages[1].uuid].containsAll([nfs.uuid, nfs2.uuid]) } - // not assign ps - VmInstanceInventory vm = createVmInstance { + logger.info("Test 301: not assign ps") + createVmInstance { name = "vm" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } - // assign root volume nfs ps - vm = createVmInstance { + logger.info("Test 302: assign root volume nfs ps") + def vm = createVmInstance { name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = nfs.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, nfs.uuid) - // assign root volume ps + logger.info("Test 303: assign root volume ps") vm = createVmInstance { name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = nfs2.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs2.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, nfs2.uuid) - // assign data volume ps + logger.info("Test 304: assign data volume ps") vm = createVmInstance { name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] - } + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : nfs.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, nfs.uuid) - // assign data , root volume ps + logger.info("Test 305: assign data, root volume ps") vm = createVmInstance { name = "vm4" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs2.uuid])] - primaryStorageUuidForRootVolume = nfs.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : nfs2.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, nfs.uuid) checkVmDataDiskPs(vm, nfs2.uuid) @@ -455,9 +572,9 @@ class CreateVmAssignNfsPsCase extends SubCase{ } } - void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 0 - for(VolumeInventory disk : vm.allVolumes){ + for (def disk : vm.allVolumes as List){ if(disk.uuid == vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid return @@ -465,9 +582,9 @@ class CreateVmAssignNfsPsCase extends SubCase{ } } - void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 1 - for(VolumeInventory disk : vm.allVolumes){ + for (def disk : vm.allVolumes as List){ if(disk.uuid != vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid } diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/volume/CreateDataVolumeWithOtherPlatformCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/volume/CreateDataVolumeWithOtherPlatformCase.groovy index 1df14795178..62b5cfb904e 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/volume/CreateDataVolumeWithOtherPlatformCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/volume/CreateDataVolumeWithOtherPlatformCase.groovy @@ -27,17 +27,6 @@ class CreateDataVolumeWithOtherPlatformCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(8) - cpu = 4 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(20) - } - cephBackupStorage { name = "ceph-bk" description = "Test" @@ -127,10 +116,8 @@ class CreateDataVolumeWithOtherPlatformCase extends SubCase { } void TestCreateDataVolumeWithOtherPlatform() { - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") ImageInventory image1 = env.inventoryByName("image1") L3NetworkInventory pubL3 = env.inventoryByName("pubL3") - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") PrimaryStorageInventory ps = env.inventoryByName("ceph-pri") KVMAgentCommands.StartVmCmd cmd @@ -139,13 +126,21 @@ class CreateDataVolumeWithOtherPlatformCase extends SubCase { return rsp } - VmInstanceInventory vm1 = createVmInstance { + def vm1 = createVmInstance { name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 4 + memorySize = gb(8) imageUuid = image1.uuid l3NetworkUuids = [pubL3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(20), + ] + ] + } as VmInstanceInventory assert cmd.getRootVolume().useVirtio == false assert cmd.getRootVolume().useVirtioSCSI == false @@ -158,12 +153,12 @@ class CreateDataVolumeWithOtherPlatformCase extends SubCase { stopVmInstance { uuid = vm1.uuid } - VolumeInventory volume1 = createDataVolume { + def volume1 = createDataVolume { name = "volume1" - diskOfferingUuid = diskOffering.uuid + diskSize = gb(20) primaryStorageUuid = ps.uuid systemTags = ["capability::virtio-scsi".toString()] - } + } as VolumeInventory attachDataVolumeToVm { vmInstanceUuid = vm1.uuid volumeUuid = volume1.uuid @@ -182,10 +177,10 @@ class CreateDataVolumeWithOtherPlatformCase extends SubCase { stopVmInstance { uuid = vm1.uuid } - VolumeInventory volume2 = createDataVolume { + def volume2 = createDataVolume { name = "volume2" - diskOfferingUuid = diskOffering.uuid - } + diskSize = gb(20) + } as VolumeInventory attachDataVolumeToVm { vmInstanceUuid = vm1.uuid volumeUuid = volume2.uuid From 71c0f8bc57f8214b6aa6b2d7d4dd3fe63c467df1 Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Wed, 28 Jan 2026 13:55:22 +0800 Subject: [PATCH 09/76] [testlib]: support diskUsePrimaryStorage in disk block allow users to initizalize the primary storage in the disk block directly. vm { disk { sizeGB(10) diskUsePrimaryStorage("nfs") } } Resolves: ZSV-11263 Change-Id: I6a7567646e6c64636279717576727a6878756476 --- .../integration/image/DeleteIsoCase.groovy | 13 ++++++++++++ .../java/org/zstack/testlib/VmDiskSpec.groovy | 20 +++++++++++++++---- .../java/org/zstack/testlib/VmSpec.groovy | 14 ++++++++----- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/image/DeleteIsoCase.groovy b/test/src/test/groovy/org/zstack/test/integration/image/DeleteIsoCase.groovy index 322f9d82ae1..5e1c485ee4f 100644 --- a/test/src/test/groovy/org/zstack/test/integration/image/DeleteIsoCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/image/DeleteIsoCase.groovy @@ -114,8 +114,10 @@ class DeleteIsoCase extends SubCase { useL3Networks("l3") useImage("iso_1") disk { + name = "disk1" boot = true sizeGB(20) + diskUsePrimaryStorage("local") } } } @@ -125,10 +127,21 @@ class DeleteIsoCase extends SubCase { @Override void test() { env.create { + prepare() testDeleteIso() } } + void prepare() { + def vm = queryVmInstance {conditions = ["name=vm"]}[0] as VmInstanceInventory + def localPs = env.inventoryByName("local") as PrimaryStorageInventory + def volumes = vm.allVolumes.findAll { + volume -> volume.name == "disk1" + } + assert volumes.size() == 1 + assert volumes[0].primaryStorageUuid == localPs.uuid + } + void testDeleteIso() { VmGlobalConfig.VM_DEFAULT_CD_ROM_NUM.updateValue(3) diff --git a/testlib/src/main/java/org/zstack/testlib/VmDiskSpec.groovy b/testlib/src/main/java/org/zstack/testlib/VmDiskSpec.groovy index ee07e12ef90..376b9e399bc 100644 --- a/testlib/src/main/java/org/zstack/testlib/VmDiskSpec.groovy +++ b/testlib/src/main/java/org/zstack/testlib/VmDiskSpec.groovy @@ -4,6 +4,8 @@ import org.zstack.header.vm.DiskAO import org.zstack.utils.data.SizeUnit class VmDiskSpec extends Spec { + private Closure primaryStorage = {} + @SpecParam(required = false) boolean boot @SpecParam(required = false) @@ -13,8 +15,6 @@ class VmDiskSpec extends Spec { @SpecParam(required = false) String architecture @SpecParam(required = false) - String primaryStorageUuid - @SpecParam(required = false) long size /** * allow: ImageVO.uuid @@ -59,7 +59,7 @@ class VmDiskSpec extends Spec { ao.platform = platform ao.guestOsType = guestOsType ao.architecture = architecture - ao.primaryStorageUuid = primaryStorageUuid + ao.primaryStorageUuid = primaryStorage(); ao.size = size ao.templateUuid = templateUuid ao.diskOfferingUuid = diskOfferingUuid @@ -73,4 +73,16 @@ class VmDiskSpec extends Spec { void sizeGB(long sizeGB) { this.size = SizeUnit.GIGABYTE.toByte(sizeGB) } -} + + @SpecMethod + void diskUsePrimaryStorage(String name) { + preCreate { + addDependency(name, PrimaryStorageSpec.class) + } + primaryStorage = { + PrimaryStorageSpec spec = findSpec(name, PrimaryStorageSpec.class) + assert spec != null: "cannot find primaryStorage[$name], check the vm block of environment" + return spec.inventory.uuid + } + } +} \ No newline at end of file diff --git a/testlib/src/main/java/org/zstack/testlib/VmSpec.groovy b/testlib/src/main/java/org/zstack/testlib/VmSpec.groovy index f8a94a8736d..0acfa28e0f9 100755 --- a/testlib/src/main/java/org/zstack/testlib/VmSpec.groovy +++ b/testlib/src/main/java/org/zstack/testlib/VmSpec.groovy @@ -33,7 +33,7 @@ class VmSpec extends Spec implements HasSession { @SpecParam List dataVolumeSystemTags = [] private List volumeToAttach = [] - private List disks = [] + private List disks = [] VmInstanceInventory inventory @@ -172,7 +172,7 @@ class VmSpec extends Spec implements HasSession { c.resolveStrategy = Closure.DELEGATE_FIRST c() addChild(diskSpec) - disks << diskSpec.toDiskAO() + disks << diskSpec return diskSpec } @@ -199,10 +199,14 @@ class VmSpec extends Spec implements HasSession { delegate.virtio = virtio if (!disks.isEmpty()) { - if (disks.every { (!it.boot) }) { - disks.first().boot = true + List diskAOs = [] + for (VmDiskSpec disk : disks) { + diskAOs << disk.toDiskAO() } - delegate.diskAOs = disks + if (diskAOs.every { (!it.boot) }) { + diskAOs.first().boot = true + } + delegate.diskAOs = diskAOs } } From 8d6e83063d007aaa301a155475dc04d9d2543e97 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 29 Jan 2026 17:14:02 +0800 Subject: [PATCH 10/76] [storage]: remove CreateVmInstance.dataDiskOfferingUuids usage update cases: * CreateVmWithDesignatedPSCase * CreateVmWithVolumeSpecifiedPsCase * CreateVmHostAllocateCase This patch is for zsv_4.10.28 Related: ZSV-8585 Change-Id: I68646b70716d7579676d6b6e756b6a637a676777 --- .../vm/CreateVmWithDesignatedPSCase.groovy | 73 +++++++++----- .../CreateVmWithVolumeSpecifiedPsCase.groovy | 98 +++++++++++-------- .../host/CreateVmHostAllocateCase.groovy | 71 ++++++++++---- 3 files changed, 153 insertions(+), 89 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy index a624ce79414..152ad9e2539 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy @@ -1,15 +1,13 @@ package org.zstack.test.integration.kvm.vm -import org.zstack.compute.vm.VmSystemTags -import org.zstack.sdk.DiskOfferingInventory +import org.zstack.core.Platform +import org.zstack.sdk.HostInventory import org.zstack.sdk.ImageInventory -import org.zstack.sdk.InstanceOfferingInventory import org.zstack.sdk.L3NetworkInventory import org.zstack.sdk.PrimaryStorageInventory import org.zstack.test.integration.kvm.KvmTest import org.zstack.testlib.EnvSpec import org.zstack.testlib.SubCase -import org.zstack.utils.data.SizeUnit class CreateVmWithDesignatedPSCase extends SubCase { @@ -23,17 +21,6 @@ class CreateVmWithDesignatedPSCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(8) - cpu = 4 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(20) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -121,23 +108,55 @@ class CreateVmWithDesignatedPSCase extends SubCase { } void TestCreateVmWithDesignatedPS() { - PrimaryStorageInventory ps_1 = env.inventoryByName("local1") - PrimaryStorageInventory ps_2 = env.inventoryByName("local2") - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") - DiskOfferingInventory diskOfferingInventory = env.inventoryByName("diskOffering") - ImageInventory image = env.inventoryByName("image") - L3NetworkInventory l3 = env.inventoryByName("l3") - - expectError { + def ps_1 = env.inventoryByName("local1") as PrimaryStorageInventory + def ps_2 = env.inventoryByName("local2") as PrimaryStorageInventory + def image = env.inventoryByName("image") as ImageInventory + def l3 = env.inventoryByName("l3") as L3NetworkInventory + + expectApiFailure({ createVmInstance { name = "test_vm" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 4 + memorySize = gb(8) l3NetworkUuids = [l3.uuid] imageUuid = image.uuid - primaryStorageUuidForRootVolume = ps_1.uuid - dataDiskOfferingUuids = [diskOfferingInventory.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): ps_2.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : ps_1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : ps_2.uuid, + ] + ] + } + }) { + // host1 -> ps_1 + // host2 -> ps_2 + assert delegate.code == "HOST_ALLOCATION.1001" + assert delegate.opaque + assert delegate.opaque["rejectedCandidates"] instanceof List + def rejectedCandidates = (delegate.opaque["rejectedCandidates"] as List>) + assert rejectedCandidates.size() == 2 + + def kvm1 = env.inventoryByName("kvm1") as HostInventory + def candidates1 = rejectedCandidates.findAll { + return it["hostUuid"] == kvm1.uuid + } + assert candidates1.size() == 1 + assert it["hostName"] == "kvm1" + assert it["reject"] == Platform.i18n("not accessible to the specific primary storage") + assert it["rejectBy"] == "HostPrimaryStorageAllocatorFlow" + + def kvm2 = env.inventoryByName("kvm2") as HostInventory + def candidates2 = rejectedCandidates.findAll { + return it["hostUuid"] == kvm2.uuid } + assert candidates2.size() == 1 + assert it["hostName"] == "kvm2" + assert it["reject"] == Platform.i18n("not accessible to the specific primary storage") + assert it["rejectBy"] == "HostPrimaryStorageAllocatorFlow" } } diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local/multips/CreateVmWithVolumeSpecifiedPsCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local/multips/CreateVmWithVolumeSpecifiedPsCase.groovy index 70a66d8682f..c8349978dfb 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local/multips/CreateVmWithVolumeSpecifiedPsCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local/multips/CreateVmWithVolumeSpecifiedPsCase.groovy @@ -2,10 +2,8 @@ package org.zstack.test.integration.storage.primary.local.multips import org.zstack.core.db.Q import org.zstack.header.image.ImageConstant -import org.zstack.header.volume.Volume import org.zstack.header.volume.VolumeVO import org.zstack.header.volume.VolumeVO_ -import org.zstack.sdk.DiskOfferingInventory import org.zstack.sdk.ImageInventory import org.zstack.sdk.PrimaryStorageInventory import org.zstack.sdk.VmInstanceInventory @@ -30,17 +28,6 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(20) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -126,11 +113,14 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { vm { name = "vm" - useInstanceOffering("instanceOffering") + cpu = 1 + memoryGB(1) useImage("image1") useL3Networks("pubL3") - useRootDiskOffering("diskOffering") useHost("kvm") + disk { + sizeGB(20) + } } } } @@ -147,20 +137,26 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { void testCreateVMPs1WithVolumePs1() { ImageInventory iso = env.inventoryByName("iso") as ImageInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory VmInstanceInventory vm = env.inventoryByName("vm") as VmInstanceInventory PrimaryStorageInventory local_ps1 = env.inventoryByName("local") as PrimaryStorageInventory - PrimaryStorageInventory local_ps2 = env.inventoryByName("local2") as PrimaryStorageInventory VmInstanceInventory newVm = createVmInstance { name = "new_vm" - instanceOfferingUuid = vm.instanceOfferingUuid - rootDiskOfferingUuid = diskOffering.uuid - dataDiskOfferingUuids = [diskOffering.uuid] + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [vm.defaultL3NetworkUuid] - primaryStorageUuidForRootVolume = local_ps1.uuid - systemTags = ["primaryStorageUuidForDataVolume::${local_ps1.uuid}".toString()] + diskAOs = [ + [ + "boot" : true, + "size" : gb(20), + "primaryStorageUuid" : local_ps1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : local_ps1.uuid, + ], + ] } as VmInstanceInventory assert newVm.allVolumes.size() == 2 @@ -174,20 +170,27 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { void testCreateVMPs1WithVolumePs2() { ImageInventory iso = env.inventoryByName("iso") as ImageInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory VmInstanceInventory vm = env.inventoryByName("vm") as VmInstanceInventory PrimaryStorageInventory local_ps1 = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local_ps2 = env.inventoryByName("local2") as PrimaryStorageInventory VmInstanceInventory newVm = createVmInstance { name = "new_vm" - instanceOfferingUuid = vm.instanceOfferingUuid - rootDiskOfferingUuid = diskOffering.uuid - dataDiskOfferingUuids = [diskOffering.uuid] + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [vm.defaultL3NetworkUuid] - primaryStorageUuidForRootVolume = local_ps1.uuid - systemTags = ["primaryStorageUuidForDataVolume::${local_ps2.uuid}".toString()] + diskAOs = [ + [ + "boot" : true, + "size" : gb(20), + "primaryStorageUuid" : local_ps1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : local_ps2.uuid, + ], + ] } as VmInstanceInventory retryInSecs { @@ -200,20 +203,26 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { void testCreateVMPs2WithVolumePs2() { ImageInventory iso = env.inventoryByName("iso") as ImageInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory VmInstanceInventory vm = env.inventoryByName("vm") as VmInstanceInventory - PrimaryStorageInventory local_ps1 = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local_ps2 = env.inventoryByName("local2") as PrimaryStorageInventory VmInstanceInventory newVm = createVmInstance { name = "new_vm" - instanceOfferingUuid = vm.instanceOfferingUuid - rootDiskOfferingUuid = diskOffering.uuid - dataDiskOfferingUuids = [diskOffering.uuid] + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [vm.defaultL3NetworkUuid] - primaryStorageUuidForRootVolume = local_ps2.uuid - systemTags = ["primaryStorageUuidForDataVolume::${local_ps2.uuid}".toString()] + diskAOs = [ + [ + "boot" : true, + "size" : gb(20), + "primaryStorageUuid" : local_ps2.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : local_ps2.uuid, + ], + ] } as VmInstanceInventory retryInSecs { @@ -226,20 +235,27 @@ class CreateVmWithVolumeSpecifiedPsCase extends SubCase { void testCreateVMPs2WithVolumePs1() { ImageInventory iso = env.inventoryByName("iso") as ImageInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory VmInstanceInventory vm = env.inventoryByName("vm") as VmInstanceInventory PrimaryStorageInventory local_ps1 = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local_ps2 = env.inventoryByName("local2") as PrimaryStorageInventory VmInstanceInventory newVm = createVmInstance { name = "new_vm" - instanceOfferingUuid = vm.instanceOfferingUuid - rootDiskOfferingUuid = diskOffering.uuid - dataDiskOfferingUuids = [diskOffering.uuid] + cpuNum = 1 + memorySize = gb(1) imageUuid = iso.uuid l3NetworkUuids = [vm.defaultL3NetworkUuid] - primaryStorageUuidForRootVolume = local_ps2.uuid - systemTags = ["primaryStorageUuidForDataVolume::${local_ps1.uuid}".toString()] + diskAOs = [ + [ + "boot" : true, + "size" : gb(20), + "primaryStorageUuid" : local_ps2.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : local_ps1.uuid, + ], + ] } as VmInstanceInventory retryInSecs { diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/host/CreateVmHostAllocateCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/host/CreateVmHostAllocateCase.groovy index ba64b555f2d..d357aa33bc0 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/host/CreateVmHostAllocateCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/host/CreateVmHostAllocateCase.groovy @@ -1,7 +1,6 @@ package org.zstack.test.integration.storage.primary.local_nfs.allocator.host import org.zstack.compute.allocator.HostPrimaryStorageAllocatorFlow -import org.zstack.compute.vm.VmSystemTags import org.zstack.core.Platform import org.zstack.sdk.* import org.zstack.test.integration.storage.StorageTest @@ -193,8 +192,6 @@ class CreateVmHostAllocateCase extends SubCase { } void testCreateVmAssignNfs(){ - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory HostInventory host = env.inventoryByName("kvm") @@ -203,35 +200,67 @@ class CreateVmHostAllocateCase extends SubCase { createVmInstance { name = "newVm" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] hostUuid = host.uuid - primaryStorageUuidForRootVolume = nfs.uuid + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ] + ] } createVmInstance { name = "newVm" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] hostUuid = host.uuid - dataDiskOfferingUuids = [diskOffering.uuid,diskOffering.uuid] - primaryStorageUuidForRootVolume = nfs.uuid - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): nfs.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(100), + "primaryStorageUuid" : nfs.uuid, + ], + [ + "size" : gb(100), + "primaryStorageUuid" : nfs.uuid, + ], + ] } - CreateVmInstanceAction createVmInstanceAction = new CreateVmInstanceAction( - name : "newVm", - instanceOfferingUuid : instanceOffering.uuid, - imageUuid : image.uuid, - l3NetworkUuids : [l3.uuid], - hostUuid : host.uuid, - dataDiskOfferingUuids : [diskOffering.uuid,diskOffering.uuid], - primaryStorageUuidForRootVolume : local.uuid, - systemTags : [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId : currentEnvSpec.session.uuid - ) - assert null != createVmInstanceAction.call().error + expectApiFailure({ + createVmInstance { + name = "newVm2" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + hostUuid = host.uuid + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(100), + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(100), + "primaryStorageUuid" : local.uuid, + ], + ] + } + }) { + assert delegate.code == "SYS.1006" + } } } From 512ba7261df01128c38f044b8e8c27b041aef1e6 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 29 Jan 2026 17:18:46 +0800 Subject: [PATCH 11/76] [localstorage]: fix error message in LocalStorageMainAllocatorFlow This patch is for zsv_4.10.28 Related: ZSV-10444 Change-Id: I68786e677666656666746868766d646c67616b69 --- .../local/LocalStorageMainAllocatorFlow.java | 15 +++++++-------- .../kvm/vm/CreateVmWithDesignatedPSCase.groovy | 12 ++++++------ 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java index 1599346804e..7b3f493e5d7 100755 --- a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java +++ b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageMainAllocatorFlow.java @@ -43,8 +43,7 @@ public class LocalStorageMainAllocatorFlow extends NoRollbackFlow { private class Result { List result; - String errStr; - List causes = new ArrayList<>(); + ErrorCode errorCode; } @Transactional(readOnly = true) @@ -77,7 +76,7 @@ private Result allocate(Map data) { query.setParameter("hstate", HostState.Enabled); query.setParameter("hstatus", HostStatus.Connected); query.setParameter("ptype", LocalStorageConstants.LOCAL_STORAGE_TYPE); - ret.errStr = i18n("the required host[uuid:%s] cannot satisfy conditions[state: %s, status: %s, size > %s bytes]," + + ret.errorCode = operr("the required host[uuid:%s] cannot satisfy conditions[state: %s, status: %s, size > %s bytes]," + " or doesn't belong to a local primary storage satisfying conditions[state: %s, status: %s]," + " or its cluster doesn't attach to any local primary storage", spec.getRequiredHostUuid(), @@ -105,7 +104,7 @@ private Result allocate(Map data) { query.setParameter("hstate", HostState.Enabled); query.setParameter("hstatus", HostStatus.Connected); query.setParameter("ptype", LocalStorageConstants.LOCAL_STORAGE_TYPE); - ret.errStr = i18n("no local primary storage in zone[uuid:%s] can satisfy conditions[state: %s, status: %s]" + + ret.errorCode = operr("no local primary storage in zone[uuid:%s] can satisfy conditions[state: %s, status: %s]" + " or contain hosts satisfying conditions[state: %s, status: %s, size > %s bytes]", spec.getRequiredZoneUuid(), PrimaryStorageState.Enabled, @@ -131,7 +130,7 @@ private Result allocate(Map data) { query.setParameter("hstatus", HostStatus.Connected); query.setParameter("ptype", LocalStorageConstants.LOCAL_STORAGE_TYPE); - ret.errStr = i18n("no local primary storage can satisfy conditions[state: %s, status: %s]" + + ret.errorCode = operr("no local primary storage can satisfy conditions[state: %s, status: %s]" + " or contain hosts satisfying conditions[state: %s, status: %s, size > %s bytes]", PrimaryStorageState.Enabled, PrimaryStorageStatus.Connected, @@ -173,13 +172,13 @@ private Result allocate(Map data) { LocalStorageHostRefVO ref = it.next(); if (!physicalCapacityMgr.checkCapacityByRatio(ref.getPrimaryStorageUuid(), ref.getTotalPhysicalCapacity(), ref.getAvailablePhysicalCapacity()) || !physicalCapacityMgr.checkRequiredCapacityByRatio(ref.getPrimaryStorageUuid(), ref.getTotalPhysicalCapacity(), spec.getTotalSize())) { - ret.causes.add(operr("{the physical capacity usage of the host[uuid:%s] has exceeded the threshold[%s]}", + errs.add(operr("{the physical capacity usage of the host[uuid:%s] has exceeded the threshold[%s]}", ref.getHostUuid(), physicalCapacityMgr.getRatio(ref.getPrimaryStorageUuid()))); it.remove(); } } if (candidateHosts.isEmpty()) { - ret.errStr = i18n("failed allocate localstorage"); + ret.errorCode = operr("failed allocate localstorage").withCause(errs); } } Set candidates = new HashSet<>(); @@ -289,7 +288,7 @@ public void run(FlowTrigger trigger, Map data) { return; } - ErrorCode err = ret.causes.isEmpty() ? operr(ret.errStr) : multiErr(ret.causes, ret.errStr); + ErrorCode err = ret.errorCode; trigger.fail(err); } } diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy index 152ad9e2539..d2ba8230fd0 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/vm/CreateVmWithDesignatedPSCase.groovy @@ -145,18 +145,18 @@ class CreateVmWithDesignatedPSCase extends SubCase { return it["hostUuid"] == kvm1.uuid } assert candidates1.size() == 1 - assert it["hostName"] == "kvm1" - assert it["reject"] == Platform.i18n("not accessible to the specific primary storage") - assert it["rejectBy"] == "HostPrimaryStorageAllocatorFlow" + assert candidates1[0]["hostName"] == "kvm1" + assert candidates1[0]["reject"] == Platform.i18n("not accessible to the specific primary storage") + assert candidates1[0]["rejectBy"] == "HostPrimaryStorageAllocatorFlow" def kvm2 = env.inventoryByName("kvm2") as HostInventory def candidates2 = rejectedCandidates.findAll { return it["hostUuid"] == kvm2.uuid } assert candidates2.size() == 1 - assert it["hostName"] == "kvm2" - assert it["reject"] == Platform.i18n("not accessible to the specific primary storage") - assert it["rejectBy"] == "HostPrimaryStorageAllocatorFlow" + assert candidates2[0]["hostName"] == "kvm2" + assert candidates2[0]["reject"] == Platform.i18n("not accessible to the specific primary storage") + assert candidates2[0]["rejectBy"] == "HostPrimaryStorageAllocatorFlow" } } From de93f6a57ebddbd3987d920a804939087690299f Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Fri, 30 Jan 2026 16:02:43 +0800 Subject: [PATCH 12/76] [ceph]: support systemTag for diskAO.diskOfferingUuid the systemTag for diskAO.diskOfferingUuid is not initialized in the previous version and uses the default value of the systemTag. so, now initialize the systemTag for each diskAO whose diskOfferingUuid is not null. Resolves: ZSV-11278 Change-Id: I616462756a656e646563716579716a6172687573 --- .../primary/CephPrimaryStorageFactory.java | 44 ++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java b/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java index f9cf2cb7050..309129214a4 100755 --- a/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java +++ b/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageFactory.java @@ -799,6 +799,7 @@ private Boolean isCephPrimaryStorageVolume(String volumeUuid) { public void preCreateVmInstance(CreateVmInstanceMsg msg) { settingRootVolume(msg); settingDataVolume(msg); + settingDataDisks(msg); } private void settingRootVolume(CreateVmInstanceMsg msg) { @@ -909,15 +910,48 @@ private void settingDataVolume(CreateVmInstanceMsg msg) { if (config.getAllocate().getPrimaryStorage() instanceof CephPrimaryStorageAllocateConfig) { CephPrimaryStorageAllocateConfig primaryStorageAllocateConfig = (CephPrimaryStorageAllocateConfig) config.getAllocate().getPrimaryStorage(); - msg.getDataVolumeSystemTags().add(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL.instantiateTag( - map( - e(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL_TOKEN, primaryStorageAllocateConfig.getPoolNames().get(0)) - ) - )); + if (!isEmpty(primaryStorageAllocateConfig.getPoolNames())) { + msg.getDataVolumeSystemTags().add(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL.instantiateTag( + map( + e(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL_TOKEN, primaryStorageAllocateConfig.getPoolNames().get(0)) + ) + )); + } } } } + private void settingDataDisks(CreateVmInstanceMsg msg) { + List diskAOs = msg.getDataDisks(); + if (diskAOs == null) { + return; + } + for (DiskAO diskAO : diskAOs) { + String diskOffering = diskAO.getDiskOfferingUuid(); + if (diskOffering == null || !DiskOfferingSystemTags.DISK_OFFERING_USER_CONFIG.hasTag(diskOffering)) { + continue; + } + DiskOfferingUserConfig config = OfferingUserConfigUtils.getDiskOfferingConfig(diskOffering, DiskOfferingUserConfig.class); + if (config.getAllocate() == null) { + continue; + } + + if (diskAO.getSystemTags() == null) { + diskAO.setSystemTags(new ArrayList<>()); + } + + if (config.getAllocate().getPrimaryStorage() instanceof CephPrimaryStorageAllocateConfig) { + CephPrimaryStorageAllocateConfig primaryStorageAllocateConfig = (CephPrimaryStorageAllocateConfig) config.getAllocate().getPrimaryStorage(); + if (!isEmpty(primaryStorageAllocateConfig.getPoolNames())) { + diskAO.getSystemTags().add(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL.instantiateTag( + map( + e(CephSystemTags.USE_CEPH_PRIMARY_STORAGE_POOL_TOKEN, primaryStorageAllocateConfig.getPoolNames().get(0)) + ) + )); + } + } + } + } @Override public void preCreateVolume(VolumeCreateMessage msg) { String diskOffering = msg.getDiskOfferingUuid(); From f9f23218a745e200f4e039b089c83c2aa25e1d1f Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Fri, 30 Jan 2026 16:55:27 +0800 Subject: [PATCH 13/76] [message]: fix incorrect parameter of the format string in ApiMessageParamValidator use the msg class name to replace the ApiMessageParamValidator class name Resolves: ZSV-11283 Change-Id: I64707261797962716e6676756a6d6f6474626e72 --- .../header/message/ApiMessageParamValidator.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/header/src/main/java/org/zstack/header/message/ApiMessageParamValidator.java b/header/src/main/java/org/zstack/header/message/ApiMessageParamValidator.java index 7fd99391ba0..823e9d81e32 100644 --- a/header/src/main/java/org/zstack/header/message/ApiMessageParamValidator.java +++ b/header/src/main/java/org/zstack/header/message/ApiMessageParamValidator.java @@ -19,7 +19,7 @@ public class ApiMessageParamValidator implements ApiMessageValidator, Ordered { @Override public void validate(APIMessage msg, Field f, Object value, APIParam at) { if (at.required() && value == null) { - throw new InvalidApiMessageException("field[%s] of message[%s] is mandatory, can not be null", f.getName(), getClass().getName()); + throw new InvalidApiMessageException("field[%s] of message[%s] is mandatory, can not be null", f.getName(), msg.getClass().getName()); } if (value != null) { @@ -32,7 +32,7 @@ private void validateNonNullValue(APIMessage msg, Field f, Object value, APIPara String str = (String) value; if (str.length() > at.maxLength()) { throw new InvalidApiMessageException("field[%s] of message[%s] exceeds max length of string. expected was <= %s, actual was %s", - f.getName(), getClass().getName(), at.maxLength(), str.length()); + f.getName(), msg.getClass().getName(), at.maxLength(), str.length()); } } @@ -40,7 +40,7 @@ private void validateNonNullValue(APIMessage msg, Field f, Object value, APIPara String str = (String) value; if (str.length() < at.minLength()) { throw new InvalidApiMessageException("field[%s] of message[%s] less than the min length of string. expected was >= %s, actual was %s", - f.getName(), getClass().getName(), at.minLength(), str.length()); + f.getName(), msg.getClass().getName(), at.minLength(), str.length()); } } @@ -48,14 +48,14 @@ private void validateNonNullValue(APIMessage msg, Field f, Object value, APIPara Collection values = (value instanceof Collection) ? (Collection) value : Collections.singletonList(value); for (Object v : values) { - validateValue(at.validValues(), v.toString(), f.getName(), getClass().getName()); + validateValue(at.validValues(), v.toString(), f.getName(), msg.getClass().getName()); } } else if (at.validEnums().length > 0) { Collection values = (value instanceof Collection) ? (Collection) value : Collections.singletonList(value); final String[] validValues = CollectionUtils.valuesForEnums(at.validEnums()).toArray(String[]::new); for (Object v : values) { - validateValue(validValues, v.toString(), f.getName(), getClass().getName()); + validateValue(validValues, v.toString(), f.getName(), msg.getClass().getName()); } } @@ -65,7 +65,7 @@ private void validateNonNullValue(APIMessage msg, Field f, Object value, APIPara Matcher mt = p.matcher(value.toString()); if (!mt.matches()){ throw new InvalidApiMessageException("valid regex value for field[%s] of message[%s] are %s, but %s found", f.getName(), - getClass().getName(), regex, value); + msg.getClass().getName(), regex, value); } } From 1bed14cfb02586b86e91a004d29bcbc89c4aedbe Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Fri, 30 Jan 2026 11:30:51 +0800 Subject: [PATCH 14/76] [storage]: remove CreateVmInstance.dataDiskOfferingUuids usage update cases: * MultiPsStartVmCase * CreateVmAssignPsCase * MultiNfsAttachMultiClusterMultiHostCase This patch is for zsv_5.0.0 Related: ZSV-8585 Change-Id: I7367677175776a6170646d6b6e7164656466656c --- .../allocator/MultiPsStartVmCase.groovy | 54 +-- .../local_smp/CreateVmAssignPsCase.groovy | 366 +++++++++++------- ...iNfsAttachMultiClusterMultiHostCase.groovy | 92 +++-- 3 files changed, 292 insertions(+), 220 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/MultiPsStartVmCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/MultiPsStartVmCase.groovy index f4b4da211f5..d89ae737b85 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/MultiPsStartVmCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_nfs/allocator/MultiPsStartVmCase.groovy @@ -1,6 +1,5 @@ package org.zstack.test.integration.storage.primary.local_nfs.allocator -import org.zstack.compute.vm.VmSystemTags import org.zstack.sdk.* import org.zstack.test.integration.storage.StorageTest import org.zstack.testlib.EnvSpec @@ -21,27 +20,6 @@ class MultiPsStartVmCase extends SubCase { @Override void environment() { env = env { - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - - diskOffering { - name = "diskOffering" - diskSize = SizeUnit.GIGABYTE.toByte(100) - } - - diskOffering { - name = "diskOffering2" - diskSize = SizeUnit.GIGABYTE.toByte(203) - } - - diskOffering { - name = "diskOffering3" - diskSize = SizeUnit.GIGABYTE.toByte(102) - } - sftpBackupStorage { name = "sftp" url = "/sftp" @@ -130,31 +108,41 @@ class MultiPsStartVmCase extends SubCase { } void createVmVolumeSizeEqualMultiPsCap() { - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory - DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory nfs = env.inventoryByName("nfs") as PrimaryStorageInventory - VmInstanceInventory vm = createVmInstance { + def vm = createVmInstance { name = "newVm" - instanceOfferingUuid = instanceOffering.uuid + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid,diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])] - primaryStorageUuidForRootVolume = nfs.uuid - } + diskAOs = [ + [ + "boot": true, + "primaryStorageUuid": nfs.uuid + ], + [ + "size": gb(100), + "primaryStorageUuid": local.uuid + ], + [ + "size": gb(100), + "primaryStorageUuid": local.uuid + ] + ] + } as VmInstanceInventory String hostUuid = vm.hostUuid for(int i =0; i < 20; i++){ stopVmInstance { - uuid = vm.uuid + delegate.uuid = vm.uuid } vm = startVmInstance { - uuid = vm.uuid - } + delegate.uuid = vm.uuid + } as VmInstanceInventory assert hostUuid == vm.hostUuid } } diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_smp/CreateVmAssignPsCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_smp/CreateVmAssignPsCase.groovy index b9ed61df289..d3bbf3ab5cc 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_smp/CreateVmAssignPsCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/local_smp/CreateVmAssignPsCase.groovy @@ -1,6 +1,5 @@ package org.zstack.test.integration.storage.primary.local_smp -import org.zstack.compute.vm.VmSystemTags import org.zstack.sdk.* import org.zstack.test.integration.storage.StorageTest import org.zstack.testlib.EnvSpec @@ -21,12 +20,6 @@ class CreateVmAssignPsCase extends SubCase{ @Override void environment() { env = env{ - instanceOffering { - name = "instanceOffering" - memory = SizeUnit.GIGABYTE.toByte(1) - cpu = 1 - } - diskOffering { name = "diskOffering" diskSize = SizeUnit.GIGABYTE.toByte(1) @@ -124,10 +117,8 @@ class CreateVmAssignPsCase extends SubCase{ void localAndSmp(){ ClusterInventory cluster = env.inventoryByName("cluster") as ClusterInventory PrimaryStorageInventory smp = env.inventoryByName("smp") as PrimaryStorageInventory - PrimaryStorageInventory smp2 = env.inventoryByName("smp2") as PrimaryStorageInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory PrimaryStorageInventory local2 = env.inventoryByName("local2") as PrimaryStorageInventory - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -158,116 +149,174 @@ class CreateVmAssignPsCase extends SubCase{ assert [dataVolumePrimaryStorages[0].uuid, dataVolumePrimaryStorages[1].uuid].containsAll([local.uuid, smp.uuid]) } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 101: not assign ps") + createVmInstance { + name = "vm101" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] + } + + logger.info("Test 102: assign root volume local ps") + createVmInstance { + name = "vm102" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + ], + ] + } + + logger.info("Test 103: assign root volume smp ps") + def vm = createVmInstance { + name = "vm103" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, smp.uuid) + + logger.info("Test 104: assign data volume local ps") + createVmInstance { + name = "vm104" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] } + + logger.info("Test 105: assign data volume smp ps") + createVmInstance { + name = "vm105" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : smp.uuid, + ], + ] + } + + logger.info("Test 106: assign root volume local ps, data volume local ps") + vm = createVmInstance { + name = "vm106" + cpuNum = 1 + memorySize = gb(1) + imageUuid = image.uuid + l3NetworkUuids = [l3.uuid] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) - checkVmDataDiskPs(vm, smp.uuid) + checkVmDataDiskPs(vm, local.uuid) - // assign root volume local ps + logger.info("Test 107: assign root volume smp ps, data volume local ps") vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + name = "vm107" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = local.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : local.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, smp.uuid) + checkVmDataDiskPs(vm, local.uuid) - // assign root volume smp ps - CreateVmInstanceAction a = new CreateVmInstanceAction( - name: "vm2", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - primaryStorageUuidForRootVolume: smp.uuid, - sessionId: currentEnvSpec.session.uuid - ) - CreateVmInstanceAction.Result result = a.call() - checkVmRootDiskPs(result.value.inventory, smp.uuid) - - // assign data volume local ps - CreateVmInstanceAction a2 = new CreateVmInstanceAction( - name: "vm2", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - assert a2.call().error == null - - // assign data volume smp ps + logger.info("Test 108: assign root volume smp ps, data volume smp ps") vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + name = "vm108" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): smp.uuid])] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : smp.uuid, + ], + ] + } as VmInstanceInventory + checkVmRootDiskPs(vm, smp.uuid) + checkVmDataDiskPs(vm, smp.uuid) - // assign root volume local ps, data volume local ps, - CreateVmInstanceAction a3 = new CreateVmInstanceAction( - name: "vm3", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: local.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a3.call() - checkVmRootDiskPs(result.value.inventory, local.uuid) - checkVmDataDiskPs(result.value.inventory, local.uuid) - - // assign root volume smp ps, data volume local ps - CreateVmInstanceAction a4 = new CreateVmInstanceAction( - name: "vm4", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: smp.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): local.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a4.call() - checkVmRootDiskPs(result.value.inventory, smp.uuid) - checkVmDataDiskPs(result.value.inventory, local.uuid) - - // assign root volume smp ps, data volume smp ps - CreateVmInstanceAction a5 = new CreateVmInstanceAction( - name: "vm4", - instanceOfferingUuid: instanceOffering.uuid, - imageUuid: image.uuid, - l3NetworkUuids: [l3.uuid], - dataDiskOfferingUuids: [diskOffering.uuid], - primaryStorageUuidForRootVolume: smp.uuid, - systemTags: [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): smp.uuid])], - sessionId: currentEnvSpec.session.uuid - ) - result = a5.call() - checkVmRootDiskPs(result.value.inventory, smp.uuid) - checkVmDataDiskPs(result.value.inventory, smp.uuid) - - // assign root volume local ps, data volume smp ps + logger.info("Test 109: assign root volume local ps, data volume smp ps") vm = createVmInstance { - name = "vm4" - instanceOfferingUuid = instanceOffering.uuid + name = "vm109" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): smp.uuid])] - primaryStorageUuidForRootVolume = local.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : local.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : smp.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, local.uuid) checkVmDataDiskPs(vm, smp.uuid) } @@ -277,7 +326,6 @@ class CreateVmAssignPsCase extends SubCase{ PrimaryStorageInventory smp = env.inventoryByName("smp") as PrimaryStorageInventory PrimaryStorageInventory smp2 = env.inventoryByName("smp2") as PrimaryStorageInventory PrimaryStorageInventory local = env.inventoryByName("local") as PrimaryStorageInventory - InstanceOfferingInventory instanceOffering = env.inventoryByName("instanceOffering") as InstanceOfferingInventory DiskOfferingInventory diskOffering = env.inventoryByName("diskOffering") as DiskOfferingInventory ImageInventory image = env.inventoryByName("image") as ImageInventory L3NetworkInventory l3 = env.inventoryByName("l3") as L3NetworkInventory @@ -308,58 +356,98 @@ class CreateVmAssignPsCase extends SubCase{ assert [dataVolumePrimaryStorages[0].uuid, dataVolumePrimaryStorages[1].uuid].containsAll([smp.uuid, smp2.uuid]) } - // not assign ps - VmInstanceInventory vm = createVmInstance { - name = "vm" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 201: not assign ps") + createVmInstance { + name = "vm201" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + ], + ] } - // assign root volume smp ps - vm = createVmInstance { - name = "vm1" - instanceOfferingUuid = instanceOffering.uuid + logger.info("Test 202: assign root volume smp ps") + def vm = createVmInstance { + name = "vm202" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = smp.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, smp.uuid) - // assign root volume ps + logger.info("Test 203: assign root volume ps") vm = createVmInstance { - name = "vm2" - instanceOfferingUuid = instanceOffering.uuid + name = "vm203" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = smp2.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp2.uuid, + ], + [ + "size" : gb(1), + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, smp2.uuid) - // assign data volume ps + logger.info("Test 204: assign data volume ps") vm = createVmInstance { - name = "vm3" - instanceOfferingUuid = instanceOffering.uuid + name = "vm204" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): smp.uuid])] - } + diskAOs = [ + [ + "boot" : true, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : smp.uuid, + ], + ] + } as VmInstanceInventory checkVmDataDiskPs(vm, smp.uuid) - // assign data , root volume ps + logger.info("Test 205: assign data, root volume ps") vm = createVmInstance { - name = "vm4" - instanceOfferingUuid = instanceOffering.uuid + name = "vm205" + cpuNum = 1 + memorySize = gb(1) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): smp2.uuid])] - primaryStorageUuidForRootVolume = smp.uuid - } + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : smp.uuid, + ], + [ + "size" : gb(1), + "primaryStorageUuid" : smp2.uuid, + ], + ] + } as VmInstanceInventory checkVmRootDiskPs(vm, smp.uuid) checkVmDataDiskPs(vm, smp2.uuid) @@ -373,9 +461,9 @@ class CreateVmAssignPsCase extends SubCase{ } } - void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmRootDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 0 - for(VolumeInventory disk : vm.allVolumes){ + for(VolumeInventory disk : vm.allVolumes as List){ if(disk.uuid == vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid return @@ -383,9 +471,9 @@ class CreateVmAssignPsCase extends SubCase{ } } - void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ + static void checkVmDataDiskPs(VmInstanceInventory vm, String psUuid){ assert vm.allVolumes.size() > 1 - for(VolumeInventory disk : vm.allVolumes){ + for(VolumeInventory disk : vm.allVolumes as List){ if(disk.uuid != vm.rootVolumeUuid){ assert psUuid == disk.primaryStorageUuid } diff --git a/test/src/test/groovy/org/zstack/test/integration/storage/primary/nfs/MultiNfsAttachMultiClusterMultiHostCase.groovy b/test/src/test/groovy/org/zstack/test/integration/storage/primary/nfs/MultiNfsAttachMultiClusterMultiHostCase.groovy index 18bc459632e..6b8138d68cc 100644 --- a/test/src/test/groovy/org/zstack/test/integration/storage/primary/nfs/MultiNfsAttachMultiClusterMultiHostCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/storage/primary/nfs/MultiNfsAttachMultiClusterMultiHostCase.groovy @@ -1,24 +1,17 @@ package org.zstack.test.integration.storage.primary.nfs import org.springframework.http.HttpEntity -import org.zstack.compute.vm.VmSystemTags import org.zstack.core.cloudbus.CloudBus import org.zstack.core.db.Q import org.zstack.core.db.SQL import org.zstack.header.Constants import org.zstack.header.errorcode.SysErrors -import org.zstack.header.message.MessageReply -import org.zstack.header.storage.primary.PingPrimaryStorageMsg -import org.zstack.header.storage.primary.PrimaryStorageConstant import org.zstack.header.storage.primary.PrimaryStorageHostRefVO import org.zstack.header.storage.primary.PrimaryStorageHostRefVO_ import org.zstack.header.storage.primary.PrimaryStorageHostStatus -import org.zstack.header.tag.SystemTagVO -import org.zstack.header.tag.SystemTagVO_ import org.zstack.header.vm.VmInstanceState import org.zstack.header.vm.VmInstanceVO import org.zstack.header.vm.VmInstanceVO_ -import org.zstack.sdk.CreateVmInstanceAction import org.zstack.sdk.DiskOfferingInventory import org.zstack.sdk.HostInventory import org.zstack.sdk.ImageInventory @@ -28,6 +21,7 @@ import org.zstack.sdk.PrimaryStorageInventory import org.zstack.sdk.ReconnectHostAction import org.zstack.sdk.ReconnectPrimaryStorageAction import org.zstack.sdk.VmInstanceInventory +import org.zstack.sdk.VolumeInventory import org.zstack.storage.primary.nfs.NfsPrimaryStorageKVMBackend import org.zstack.storage.primary.nfs.NfsPrimaryStorageKVMBackendCommands import org.zstack.test.integration.storage.NfsEnv @@ -129,46 +123,30 @@ class MultiNfsAttachMultiClusterMultiHostCase extends SubCase{ } } - void testCreateVmSpecifyHost() { - disconnectHostPS(host1.uuid, ps1.uuid) - disconnectHostPS(host1.uuid, ps2.uuid) - - def a = new CreateVmInstanceAction() - a.name == "vm3" - a.instanceOfferingUuid = ins.uuid - a.imageUuid = image.uuid - a.l3NetworkUuids = [l3.uuid] - a.sessionId = env.session.uuid - a.hostUuid = host1.uuid - - assert a.call().error.code == SysErrors.OPERATION_ERROR - - recoverConnectHostPS() - - disconnectHostPS(host1.uuid, ps2.uuid) - - // try again - def vm3 = a.call().value.inventory - - assert vm3.hostUuid == host1.uuid - assert vm3.allVolumes.size() == 1 - assert vm3.allVolumes[0].primaryStorageUuid == ps1.uuid - } - void testCreateAndStartVmNotOnHostDisconnectNfs(){ disconnectHostPS(host1.uuid, ps1.uuid) disconnectHostPS(host2.uuid, ps2.uuid) - expectError { + expectApiFailure ({ createVmInstance { name = "vm_failed" - instanceOfferingUuid = ins.uuid + cpuNum = 4 + memorySize = gb(8) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = ps1.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): ps2.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : ps1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : ps2.uuid, + ] + ] } + }) { + assert delegate.code == "HOST_ALLOCATION.1001" } recoverConnectHostPS() @@ -176,17 +154,25 @@ class MultiNfsAttachMultiClusterMultiHostCase extends SubCase{ disconnectHostPS(host2.uuid, ps2.uuid) VmInstanceInventory vm3 = createVmInstance { name = "vm3" - instanceOfferingUuid = ins.uuid + cpuNum = 4 + memorySize = gb(8) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = ps1.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): ps2.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : ps1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : ps2.uuid, + ] + ] } as VmInstanceInventory assert vm3.hostUuid == host1.uuid assert vm3.allVolumes.size() == 2 - assert vm3.allVolumes.primaryStorageUuid.containsAll([ps1.uuid, ps2.uuid]) + assert (vm3.allVolumes as List).primaryStorageUuid.containsAll([ps1.uuid, ps2.uuid]) stopVmInstance { uuid = vm3.uuid @@ -224,16 +210,26 @@ class MultiNfsAttachMultiClusterMultiHostCase extends SubCase{ connectingHostPS(host3.uuid, ps1.uuid) connectingHostPS(host3.uuid, ps2.uuid) - expect(AssertionError.class) { + expectApiFailure ({ createVmInstance { name = "vm4" - instanceOfferingUuid = ins.uuid + cpuNum = 4 + memorySize = gb(8) imageUuid = image.uuid l3NetworkUuids = [l3.uuid] - primaryStorageUuidForRootVolume = ps1.uuid - dataDiskOfferingUuids = [diskOffering.uuid] - systemTags = [VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME.instantiateTag([(VmSystemTags.PRIMARY_STORAGE_UUID_FOR_DATA_VOLUME_TOKEN): ps2.uuid])] + diskAOs = [ + [ + "boot" : true, + "primaryStorageUuid" : ps1.uuid, + ], + [ + "size" : gb(20), + "primaryStorageUuid" : ps2.uuid, + ] + ] } + }) { + assert delegate.code == "HOST_ALLOCATION.1001" } } From 286395ab9adb59370561e592af96f79d9e0cc8bd Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Tue, 24 Feb 2026 15:07:29 +0800 Subject: [PATCH 15/76] [i18n]: translate the error message when reconnecting hosts after modifying host password after modifying the host password, the zstack MN will fail to reconnect the hosts and get the error message that is not well translated. we put the translated error message in the "i18nDetails" field. Resolves: ZSV-8182 Change-Id: I617066656b656c6e74726374777573616b666f6e --- conf/i18n/messages_zh_CN.properties | 2 +- conf/i18n_json/i18n_kvm.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/i18n/messages_zh_CN.properties b/conf/i18n/messages_zh_CN.properties index c022aed6460..ac0193789ba 100755 --- a/conf/i18n/messages_zh_CN.properties +++ b/conf/i18n/messages_zh_CN.properties @@ -1914,7 +1914,7 @@ unable\ to\ connect\ to\ kvm\ host[uuid\:%s,\ ip\:%s,\ url\:%s],\ because\ %s = host\ can\ not\ access\ any\ primary\ storage = 主机无法访问任何数据存储 connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = 连接主机 {0} [ip:{1}] 失败 the\ host[%s]\ ssh\ port[%s]\ not\ open\ after\ %s\ seconds,\ connect\ timeout = 主机[{0}]SSH端口[{1}]在{2}秒后未打开,连接超时 -host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = +host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = 主机[UUID:{0}]的密码已被修改,请通过 UpdateKVMHostAction 在管理节点更新主机密码 failed\ to\ connect\ host[UUID\=%s]\ with\ SSH\ password = failed\ to\ connect\ host[UUID\=%s]\ with\ private\ key = unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:\ %d,\ ]\ to\ do\ DNS\ check,\ please\ check\ if\ username/password\ is\ wrong;\ %s = 无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2} ]做DNS检查,请检查用户名密码是否正确;{3} diff --git a/conf/i18n_json/i18n_kvm.json b/conf/i18n_json/i18n_kvm.json index 8271000d4fe..ad420548404 100644 --- a/conf/i18n_json/i18n_kvm.json +++ b/conf/i18n_json/i18n_kvm.json @@ -622,7 +622,7 @@ { "raw": "host password has been changed. Please update host password in management node by UpdateKVMHostAction with host UUID[%s]", "en_US": "host password has been changed. Please update host password in management node by UpdateKVMHostAction with host UUID[{0}]", - "zh_CN": "", + "zh_CN": "主机[UUID:{0}]的密码已被修改,请通过 UpdateKVMHostAction 在管理节点更新主机密码", "arguments": [ "self.getUuid()" ], From 98e2c1c482a871eada8322c347245591a694221f Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Thu, 26 Feb 2026 11:00:00 +0800 Subject: [PATCH 16/76] [conf]: add i18n "volume[uuid:%s] has been deleted" the volume can be deleted and if users want to recover the vm through the snapshot that took after binding the aforesaid volume , the system may complain the null pointer exception because we do not check if the volumes are deleted or not. Resolves: ZSV-5838 Change-Id: I636c626d7372676776637661766d626466707a72 --- conf/i18n/messages_en_US.properties | 1 + conf/i18n/messages_zh_CN.properties | 5 +++-- conf/i18n_json/i18n_zsv.json | 9 +++++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/conf/i18n/messages_en_US.properties b/conf/i18n/messages_en_US.properties index 199d8defb37..00832c2e393 100755 --- a/conf/i18n/messages_en_US.properties +++ b/conf/i18n/messages_en_US.properties @@ -3967,6 +3967,7 @@ new\ volume(s)\ %s\ attached\ after\ snapshot\ point,\ can\ only\ revert\ one\ b \ volume[uuid\:\ %s]\ has\ been\ referenced\ by\ other\ volumes\ [%s],\ can\ not\ change\ install\ path\ before\ flatten\ them\ and\ their\ descendants\ = volume[uuid: {0}] has been referenced by other volumes [{1}], can not change install path before flatten them and their descendants current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = current volume state[{0}] doesn''t allow to proceed message[{1}] failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = failed to select backup storage to download iso[uuid={0}] +volume[uuid\:%s]\ has\ been\ deleted = volume[uuid:{0}] has been deleted unable\ to\ download\ iso\ to\ primary\ storage = unable to download iso to primary storage volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = volume[uuid:{0}, type:{1}], can''t create snapshot volume[uuid\:%s]\ is\ not\ in\ state\ Enabled,\ current\ is\ %s,\ can't\ create\ snapshot = volume[uuid:{0}] is not in state Enabled, current is {1}, can''t create snapshot diff --git a/conf/i18n/messages_zh_CN.properties b/conf/i18n/messages_zh_CN.properties index ac0193789ba..27392033798 100755 --- a/conf/i18n/messages_zh_CN.properties +++ b/conf/i18n/messages_zh_CN.properties @@ -1914,7 +1914,7 @@ unable\ to\ connect\ to\ kvm\ host[uuid\:%s,\ ip\:%s,\ url\:%s],\ because\ %s = host\ can\ not\ access\ any\ primary\ storage = 主机无法访问任何数据存储 connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = 连接主机 {0} [ip:{1}] 失败 the\ host[%s]\ ssh\ port[%s]\ not\ open\ after\ %s\ seconds,\ connect\ timeout = 主机[{0}]SSH端口[{1}]在{2}秒后未打开,连接超时 -host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = 主机[UUID:{0}]的密码已被修改,请通过 UpdateKVMHostAction 在管理节点更新主机密码 +host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = failed\ to\ connect\ host[UUID\=%s]\ with\ SSH\ password = failed\ to\ connect\ host[UUID\=%s]\ with\ private\ key = unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:\ %d,\ ]\ to\ do\ DNS\ check,\ please\ check\ if\ username/password\ is\ wrong;\ %s = 无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2} ]做DNS检查,请检查用户名密码是否正确;{3} @@ -3966,7 +3966,8 @@ volume(s)\ %s\ is\ no\ longer\ attached,\ can\ only\ revert\ one\ by\ one.\ If\ new\ volume(s)\ %s\ attached\ after\ snapshot\ point,\ can\ only\ revert\ one\ by\ one.\ If\ you\ need\ to\ group\ revert,\ please\ detach\ it. = \ volume[uuid\:\ %s]\ has\ been\ referenced\ by\ other\ volumes\ [%s],\ can\ not\ change\ install\ path\ before\ flatten\ them\ and\ their\ descendants\ = current\ volume\ state[%s]\ doesn't\ allow\ to\ proceed\ message[%s] = -failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = +failed\ to\ select\ backup\ storage\ to\ download\ iso[uuid\=%s] = +volume[uuid\:%s]\ has\ been\ deleted = 硬盘[uuid:{0}]已经被删除了 unable\ to\ download\ iso\ to\ primary\ storage = volume[uuid\:%s,\ type\:%s],\ can't\ create\ snapshot = 卷[uuid:{0},类型:{1}],无法创建快照 volume[uuid\:%s]\ is\ not\ in\ state\ Enabled,\ current\ is\ %s,\ can't\ create\ snapshot = diff --git a/conf/i18n_json/i18n_zsv.json b/conf/i18n_json/i18n_zsv.json index ef72f76fd43..b8bc25c6fd8 100644 --- a/conf/i18n_json/i18n_zsv.json +++ b/conf/i18n_json/i18n_zsv.json @@ -113,5 +113,14 @@ "zh_CN": "", "arguments": [], "fileName": "src/main/java/org/zstack/zsv/storage/ZsvStorageManager.java" + }, + { + "raw": "volume[uuid:%s] has been deleted", + "en_US": "volume[uuid:{0}] has been deleted", + "zh_CN": "硬盘[uuid:{0}]已经被删除了", + "arguments": [ + "volId" + ], + "fileName": "src/main/java/org/zstack/zsv/snapshotgroup/ZsvBeforeRevertSnapshotGroupFlow.java" } ] \ No newline at end of file From c82be836bdfee9370a912629c2ac1ccfd17eb51c Mon Sep 17 00:00:00 2001 From: "Chen, Taiyue" Date: Tue, 3 Mar 2026 17:27:18 +0800 Subject: [PATCH 17/76] [conf]: translate the error message when reconnecting hosts after modifying host password Problem: when the locale is set as zh_CN, if the password of the host has been changed, the zsv reconnects the host and complain the error in English. Actually, it should report the error using chinese. Solution: Add the chinese version of the error to the messages_zh_CN.properties file. Resolves: ZSV-8182 Change-Id: I756965616171666272646d6465666e6e71706467 --- conf/i18n/messages_zh_CN.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/i18n/messages_zh_CN.properties b/conf/i18n/messages_zh_CN.properties index 27392033798..f955b78cb17 100755 --- a/conf/i18n/messages_zh_CN.properties +++ b/conf/i18n/messages_zh_CN.properties @@ -1914,7 +1914,7 @@ unable\ to\ connect\ to\ kvm\ host[uuid\:%s,\ ip\:%s,\ url\:%s],\ because\ %s = host\ can\ not\ access\ any\ primary\ storage = 主机无法访问任何数据存储 connection\ error\ for\ KVM\ host[uuid\:%s,\ ip\:%s] = 连接主机 {0} [ip:{1}] 失败 the\ host[%s]\ ssh\ port[%s]\ not\ open\ after\ %s\ seconds,\ connect\ timeout = 主机[{0}]SSH端口[{1}]在{2}秒后未打开,连接超时 -host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = +host\ password\ has\ been\ changed.\ Please\ update\ host\ password\ in\ management\ node\ by\ UpdateKVMHostAction\ with\ host\ UUID[%s] = 主机[UUID:{0}]的密码已被修改,请通过 UpdateKVMHostAction 在管理节点更新主机密码 failed\ to\ connect\ host[UUID\=%s]\ with\ SSH\ password = failed\ to\ connect\ host[UUID\=%s]\ with\ private\ key = unable\ to\ connect\ to\ KVM[ip\:%s,\ username\:%s,\ sshPort\:\ %d,\ ]\ to\ do\ DNS\ check,\ please\ check\ if\ username/password\ is\ wrong;\ %s = 无法连接主机[ip:{0}, 用户名:{1}, ssh端口:{2} ]做DNS检查,请检查用户名密码是否正确;{3} From 125d76855ddb49ec3bfcb8a43014ead481da0eaf Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 10 Mar 2026 21:32:27 +0800 Subject: [PATCH 18/76] [identity]: introduce resource-viewer roles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add resource-viewer roles for supporting to query all resources; * Change multiple permission checks from “Administrator/Synchronization Call Detection Only to the broade “Full Resource Read Access / Read-Only API” detection. Resolves: ZSV-11447 Change-Id: I6d666a686e776b70617474617767676175626d67 --- .../main/java/org/zstack/header/RBACInfo.java | 5 +++++ .../zstack/header/identity/AccountConstant.java | 6 ++++++ .../org/zstack/header/message/APIMessage.java | 11 +++++++++++ .../main/java/org/zstack/identity/Account.java | 16 ++++++++++++++++ .../identity/AccountSubQueryExtension.java | 2 +- .../identity/rbac/RBACAPIRequestChecker.java | 8 ++++++++ .../rbac/RBACResourceRequestChecker.java | 3 +-- 7 files changed, 48 insertions(+), 3 deletions(-) diff --git a/header/src/main/java/org/zstack/header/RBACInfo.java b/header/src/main/java/org/zstack/header/RBACInfo.java index 0a30b416388..7e557ce3e55 100755 --- a/header/src/main/java/org/zstack/header/RBACInfo.java +++ b/header/src/main/java/org/zstack/header/RBACInfo.java @@ -32,6 +32,11 @@ public void roles() { .actions("org.zstack.header.**") .build(); + roleBuilder() + .name("resource-viewer") + .uuid(AccountConstant.ALL_RESOURCES_READABLE_ROLE_UUID) + .build(); + roleBuilder() .name("sod-system-administrator") .uuid(AccountConstant.SOD_SYSTEM_ADMIN_ROLE_UUID) diff --git a/header/src/main/java/org/zstack/header/identity/AccountConstant.java b/header/src/main/java/org/zstack/header/identity/AccountConstant.java index 12eacd78341..a56750f21e6 100755 --- a/header/src/main/java/org/zstack/header/identity/AccountConstant.java +++ b/header/src/main/java/org/zstack/header/identity/AccountConstant.java @@ -41,6 +41,12 @@ public interface AccountConstant { String OTHER_ROLE_UUID = "80315b1f85314917826b182bf6def552"; String LEGACY_ROLE_UUID = "85cfac2138494b2db6501881e1e68045"; + /** + * Allow querying all resources. + * Querying audit data is not allowed + */ + String ALL_RESOURCES_READABLE_ROLE_UUID = "8550153cd5474c79850566787fe0f055"; + // for: Separation of Duties String SOD_SYSTEM_ADMIN_ROLE_UUID = "8550125df53c54edb33d1b8ae83ded55"; String SOD_SECURITY_ADMIN_ROLE_UUID = "855013d87cf55944b4a6c6ae729b3f55"; diff --git a/header/src/main/java/org/zstack/header/message/APIMessage.java b/header/src/main/java/org/zstack/header/message/APIMessage.java index 8ee3efd1b0e..123f091b45e 100755 --- a/header/src/main/java/org/zstack/header/message/APIMessage.java +++ b/header/src/main/java/org/zstack/header/message/APIMessage.java @@ -223,6 +223,17 @@ private static void validateValue(String[] validValues, String value, String fie } } + public static boolean isReadOnlyApi(Class apiClass) { + // TODO: will add RestRequest.readOnly() + // Note: APIGenerateSshKeyPairMsg is not a read-only api, but it is a sync api + if (apiClass.getSimpleName().equals("APIGenerateSshKeyPairMsg") + || apiClass.getSimpleName().equals("APIBatchSyncVolumeSizeMsg") + || apiClass.getSimpleName().equals("APICheckElaborationContentMsg")) { + return false; + } + return APISyncCallMessage.class.isAssignableFrom(apiClass); + } + public String getOperator() { return null; } diff --git a/identity/src/main/java/org/zstack/identity/Account.java b/identity/src/main/java/org/zstack/identity/Account.java index 72fff1a68af..9050bd2df8a 100755 --- a/identity/src/main/java/org/zstack/identity/Account.java +++ b/identity/src/main/java/org/zstack/identity/Account.java @@ -5,6 +5,7 @@ import org.zstack.header.identity.role.RoleAccountRefVO; import org.zstack.header.identity.role.RoleAccountRefVO_; +import static org.zstack.header.identity.AccountConstant.ALL_RESOURCES_READABLE_ROLE_UUID; import static org.zstack.header.identity.AccountConstant.SOD_AUDITOR_ROLE_UUID; import static org.zstack.header.identity.AccountConstant.SOD_SYSTEM_ADMIN_ROLE_UUID; @@ -40,6 +41,21 @@ static boolean isAdmin(String accountUuid) { return AccountConstant.isAdmin(accountUuid); } + static boolean isAllResourcesReadable(SessionInventory session) { + return isAllResourcesReadable(session.getAccountUuid()); + } + + static boolean isAllResourcesReadable(String accountUuid) { + if (isAdminPermission(accountUuid)) { + return true; + } + + return Q.New(RoleAccountRefVO.class) + .eq(RoleAccountRefVO_.accountUuid, accountUuid) + .eq(RoleAccountRefVO_.roleUuid, ALL_RESOURCES_READABLE_ROLE_UUID) + .isExists(); + } + static boolean supportToQueryAuditsFromAllAccounts(SessionInventory session) { return supportToQueryAuditsFromAllAccounts(session.getAccountUuid()); } diff --git a/identity/src/main/java/org/zstack/identity/AccountSubQueryExtension.java b/identity/src/main/java/org/zstack/identity/AccountSubQueryExtension.java index cdb3e84bf40..22af5c26d80 100755 --- a/identity/src/main/java/org/zstack/identity/AccountSubQueryExtension.java +++ b/identity/src/main/java/org/zstack/identity/AccountSubQueryExtension.java @@ -13,7 +13,7 @@ public class AccountSubQueryExtension extends AbstractMysqlQuerySubQueryExtensio @Override public String makeSubquery(APIQueryMessage msg, Class inventoryClass) { - if (Account.isAdminPermission(msg.getSession())) { + if (Account.isAllResourcesReadable(msg.getSession())) { return null; } diff --git a/identity/src/main/java/org/zstack/identity/rbac/RBACAPIRequestChecker.java b/identity/src/main/java/org/zstack/identity/rbac/RBACAPIRequestChecker.java index 9b31db34d5c..c82ffcaf9dc 100755 --- a/identity/src/main/java/org/zstack/identity/rbac/RBACAPIRequestChecker.java +++ b/identity/src/main/java/org/zstack/identity/rbac/RBACAPIRequestChecker.java @@ -76,6 +76,14 @@ public void check(APIMessage message) { apiClass.getName())); } + if (APIMessage.isReadOnlyApi(apiClass) && Account.isAllResourcesReadable(session)) { + // exclude audits / event api + if (!"APIGetAuditDataMsg".equals(apiClass.getSimpleName()) + && !"APIGetEventDataMsg".equals(apiClass.getSimpleName())) { + return; + } + } + if (!check()) { permissionDenied(); } diff --git a/identity/src/main/java/org/zstack/identity/rbac/RBACResourceRequestChecker.java b/identity/src/main/java/org/zstack/identity/rbac/RBACResourceRequestChecker.java index e83be5fb250..bcd1f0680d8 100644 --- a/identity/src/main/java/org/zstack/identity/rbac/RBACResourceRequestChecker.java +++ b/identity/src/main/java/org/zstack/identity/rbac/RBACResourceRequestChecker.java @@ -10,7 +10,6 @@ import org.zstack.header.message.APIMessage; import org.zstack.header.message.APIParam; import org.zstack.header.message.APIResourceScope; -import org.zstack.header.message.APISyncCallMessage; import org.zstack.header.tag.SystemTagVO; import org.zstack.header.tag.SystemTagVO_; import org.zstack.header.vo.ResourceVO; @@ -107,7 +106,7 @@ private void checkOperationTarget(APIMessage.FieldParam param) { return; } - if (message instanceof APISyncCallMessage) { + if (APIMessage.isReadOnlyApi(message.getClass())) { // no check to read api return; } From b429014412b79413593d39d2df0e0be249eee2ea Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 13 Jan 2026 14:57:37 +0800 Subject: [PATCH 19/76] [compute]: add opaque on check root disk settings failed Related: ZSV-10444 Change-Id: I7a6864656668797a6c6a62786a7467626b73797a --- .../java/org/zstack/compute/vm/VmInstanceApiInterceptor.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java index c17cf5d5179..9d110a31e46 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceApiInterceptor.java @@ -1012,7 +1012,8 @@ private void validateRootDiskOffering(ImageMediaType imgFormat, APICreateVmInsta } if (msg.getRootDiskSize() <= 0) { - throw new ApiMessageInterceptionException(operr("Unexpected root disk settings")); + throw new ApiMessageInterceptionException(operr("Unexpected root disk settings") + .withException("DiskAO[0].size is mandatory when image format is ISO")); } } } From 3dce35413cc860b75e6f060e886703e9b3a7c1bb Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 13 Jan 2026 17:27:47 +0800 Subject: [PATCH 20/76] [kvm]: add VM edk tags VM edk tag use to save the EDK version the VM used Resolves: ZSV-11010 Change-Id: I797770686172776472746b79767876697062747a --- conf/springConfigXml/Kvm.xml | 3 +- ...sion.java => BootKvmStartVmExtension.java} | 28 ++++++++++++++++++- .../java/org/zstack/kvm/KVMAgentCommands.java | 9 ++++++ .../java/org/zstack/kvm/KVMSystemTags.java | 4 +++ .../test/resources/springConfigXml/Kvm.xml | 3 +- .../org/zstack/testlib/KVMSimulator.groovy | 1 + 6 files changed, 45 insertions(+), 3 deletions(-) rename plugin/kvm/src/main/java/org/zstack/kvm/{BootOrderKvmStartVmExtension.java => BootKvmStartVmExtension.java} (50%) diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 7536337cff7..3fcef810769 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -165,9 +165,10 @@ - + + diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/BootOrderKvmStartVmExtension.java b/plugin/kvm/src/main/java/org/zstack/kvm/BootKvmStartVmExtension.java similarity index 50% rename from plugin/kvm/src/main/java/org/zstack/kvm/BootOrderKvmStartVmExtension.java rename to plugin/kvm/src/main/java/org/zstack/kvm/BootKvmStartVmExtension.java index d8896ae447b..9da3a907662 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/BootOrderKvmStartVmExtension.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/BootKvmStartVmExtension.java @@ -2,14 +2,21 @@ import org.zstack.compute.vm.VmSystemTags; import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.vm.VmInstanceInventory; import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstanceVO; +import org.zstack.tag.SystemTagCreator; + +import static org.zstack.kvm.KVMSystemTags.EDK_RPM_TOKEN; +import static org.zstack.kvm.KVMSystemTags.VM_EDK; +import static org.zstack.utils.CollectionDSL.e; +import static org.zstack.utils.CollectionDSL.map; /** * author:kaicai.hu * Date:2019/12/25 */ -public class BootOrderKvmStartVmExtension implements KVMStartVmExtensionPoint { +public class BootKvmStartVmExtension implements KVMStartVmExtensionPoint, KVMSyncVmDeviceInfoExtensionPoint { @Override public void startVmOnKvmSuccess(KVMHostInventory host, VmInstanceSpec spec) { @@ -32,4 +39,23 @@ public void startVmOnKvmFailed(KVMHostInventory host, VmInstanceSpec spec, Error public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { } + + @Override + public void afterReceiveVmDeviceInfoResponse(VmInstanceInventory vm, KVMAgentCommands.VmDevicesInfoResponse rsp, VmInstanceSpec spec) { + saveVmEdkStatesFromCommand(spec.getVmInventory().getUuid(), rsp); + } + + @SuppressWarnings("unchecked") + private void saveVmEdkStatesFromCommand(String vmUuid, KVMAgentCommands.VmDevicesInfoResponse rsp) { + if (rsp.getEdkRpm() == null) { + VM_EDK.deleteInherentTag(vmUuid); + return; + } + + SystemTagCreator creator = VM_EDK.newSystemTagCreator(vmUuid); + creator.setTagByTokens(map(e(EDK_RPM_TOKEN, rsp.getEdkRpm()))); + creator.inherent = true; + creator.recreate = true; + creator.create(); + } } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 92e76ede2c5..f69eff87317 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -2703,6 +2703,7 @@ public static class VmDevicesInfoResponse extends AgentResponse { private List virtualDeviceInfoList; private VirtualDeviceInfo memBalloonInfo; private VirtualizerInfoTO virtualizerInfo; + private String edkRpm; @NoLogging private String vmXml; @@ -2738,6 +2739,14 @@ public void setVirtualizerInfo(VirtualizerInfoTO virtualizerInfo) { this.virtualizerInfo = virtualizerInfo; } + public String getEdkRpm() { + return edkRpm; + } + + public void setEdkRpm(String edkRpm) { + this.edkRpm = edkRpm; + } + public String getVmXml() { return vmXml; } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java index f630fa79b39..63735015f57 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java @@ -67,4 +67,8 @@ public class KVMSystemTags { public static SystemTag FORCE_DEPLOYMENT_ONCE = new SystemTag("force::deployment::once", HostVO.class); + + public static final String EDK_RPM_TOKEN = "edkRpm"; + public static PatternedSystemTag VM_EDK = + new PatternedSystemTag(String.format("vm::edk::{%s}", EDK_RPM_TOKEN), VmInstanceVO.class); } diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 70e706ddef2..6bf72dbe39a 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -159,9 +159,10 @@ - + + diff --git a/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy b/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy index 94fc178245d..f8794f26acd 100755 --- a/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy +++ b/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy @@ -489,6 +489,7 @@ class KVMSimulator implements Simulator { rsp.virtualizerInfo.uuid = cmd.vmInstanceUuid rsp.virtualizerInfo.virtualizer = "qemu-kvm" rsp.virtualizerInfo.version = "4.2.0-632.g6a6222b.el7" + rsp.edkRpm = "edk2-ovmf-20220126gitbb1bba3d77-3.el8.noarch" return rsp } From bab2bb085bd2bdfaeba9f7814c40798bc5b6c274 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 5 Feb 2026 10:40:16 +0800 Subject: [PATCH 21/76] [conf]: add V5.0.0__schema.sql V5.0.0__schema will move to conf/db/zsv, and ref files will move to conf/db/zsv_ref Related: ZSV-11310 Change-Id: I6d756c7a65746f646575726a61686c6d6f6a6a62 --- build/deploydb.sh | 1 + conf/db/zsv/V5.0.0__schema.sql | 22 +++++++++++++++++++ conf/db/{zsv => zsv_ref}/4.1.0 | 0 conf/db/{zsv => zsv_ref}/4.1.6 | 0 conf/db/{zsv => zsv_ref}/4.2.0 | 0 conf/db/{zsv => zsv_ref}/4.2.6 | 0 conf/db/{zsv => zsv_ref}/4.2.8 | 0 conf/db/{zsv => zsv_ref}/4.3.0 | 0 conf/db/{zsv => zsv_ref}/4.3.1 | 0 conf/deploydb.sh | 1 + .../db/schema/CheckNotNullFieldCase.groovy | 8 ++++--- 11 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 conf/db/zsv/V5.0.0__schema.sql rename conf/db/{zsv => zsv_ref}/4.1.0 (100%) rename conf/db/{zsv => zsv_ref}/4.1.6 (100%) rename conf/db/{zsv => zsv_ref}/4.2.0 (100%) rename conf/db/{zsv => zsv_ref}/4.2.6 (100%) rename conf/db/{zsv => zsv_ref}/4.2.8 (100%) rename conf/db/{zsv => zsv_ref}/4.3.0 (100%) rename conf/db/{zsv => zsv_ref}/4.3.1 (100%) diff --git a/build/deploydb.sh b/build/deploydb.sh index 49964c58754..f8446f31450 100755 --- a/build/deploydb.sh +++ b/build/deploydb.sh @@ -42,6 +42,7 @@ mkdir -p ${flyway_sql} eval "rm -f ${flyway_sql}/*" cp ${base}/../conf/db/V0.6__schema.sql ${flyway_sql} cp ${base}/../conf/db/upgrade/* ${flyway_sql} +cp ${base}/../conf/db/zsv/* ${flyway_sql} if [[ ! -n $host ]] || [[ ! -n $port ]];then url="jdbc:mysql://localhost:3306/zstack" diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql new file mode 100644 index 00000000000..3ee7e729906 --- /dev/null +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -0,0 +1,22 @@ +-- Feature: vTPM & Secure Boot | ZSPHER-1, ZSPHER-14 + +CREATE TABLE IF NOT EXISTS `zstack`.`TpmVO` ( + `uuid` char(32) NOT NULL UNIQUE, + `vmInstanceUuid` char(32) NOT NULL, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + CONSTRAINT `fkTpmVOVmInstanceVO` FOREIGN KEY (`vmInstanceUuid`) REFERENCES `VmInstanceEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`TpmHostRefVO` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT, + `tpmUuid` char(32) NOT NULL, + `hostUuid` char(32) NOT NULL, + `path` varchar(255) NOT NULL, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`id`), + CONSTRAINT `fkTpmHostRefVOTpmVO` FOREIGN KEY (`tpmUuid`) REFERENCES `TpmVO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE, + CONSTRAINT `fkTpmHostRefVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/conf/db/zsv/4.1.0 b/conf/db/zsv_ref/4.1.0 similarity index 100% rename from conf/db/zsv/4.1.0 rename to conf/db/zsv_ref/4.1.0 diff --git a/conf/db/zsv/4.1.6 b/conf/db/zsv_ref/4.1.6 similarity index 100% rename from conf/db/zsv/4.1.6 rename to conf/db/zsv_ref/4.1.6 diff --git a/conf/db/zsv/4.2.0 b/conf/db/zsv_ref/4.2.0 similarity index 100% rename from conf/db/zsv/4.2.0 rename to conf/db/zsv_ref/4.2.0 diff --git a/conf/db/zsv/4.2.6 b/conf/db/zsv_ref/4.2.6 similarity index 100% rename from conf/db/zsv/4.2.6 rename to conf/db/zsv_ref/4.2.6 diff --git a/conf/db/zsv/4.2.8 b/conf/db/zsv_ref/4.2.8 similarity index 100% rename from conf/db/zsv/4.2.8 rename to conf/db/zsv_ref/4.2.8 diff --git a/conf/db/zsv/4.3.0 b/conf/db/zsv_ref/4.3.0 similarity index 100% rename from conf/db/zsv/4.3.0 rename to conf/db/zsv_ref/4.3.0 diff --git a/conf/db/zsv/4.3.1 b/conf/db/zsv_ref/4.3.1 similarity index 100% rename from conf/db/zsv/4.3.1 rename to conf/db/zsv_ref/4.3.1 diff --git a/conf/deploydb.sh b/conf/deploydb.sh index a9f8dd90673..0a9bdc2a659 100755 --- a/conf/deploydb.sh +++ b/conf/deploydb.sh @@ -42,6 +42,7 @@ mkdir -p $flyway_sql cp $base/db/V0.6__schema.sql $flyway_sql cp $base/db/upgrade/* $flyway_sql +cp $base/db/zsv/* $flyway_sql url="jdbc:mysql://$host:$port/zstack" diff --git a/test/src/test/groovy/org/zstack/test/integration/db/schema/CheckNotNullFieldCase.groovy b/test/src/test/groovy/org/zstack/test/integration/db/schema/CheckNotNullFieldCase.groovy index ce902e842ab..77d411b2e7f 100644 --- a/test/src/test/groovy/org/zstack/test/integration/db/schema/CheckNotNullFieldCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/db/schema/CheckNotNullFieldCase.groovy @@ -3,8 +3,6 @@ package org.zstack.test.integration.db.schema import org.zstack.core.db.Q import org.zstack.header.vo.ResourceVO import org.zstack.header.vo.ResourceVO_ -import org.zstack.header.identity.AccountResourceRefVO -import org.zstack.header.identity.AccountResourceRefVO_ import org.zstack.testlib.EnvSpec import org.zstack.testlib.SubCase import org.zstack.utils.VersionComparator @@ -31,7 +29,11 @@ class CheckNotNullFieldCase extends SubCase{ @Override void test() { - String upgradeSchemaDir = Paths.get("../conf/db/upgrade").toAbsolutePath().normalize().toString() + checkNotNullField(Paths.get("../conf/db/upgrade").toAbsolutePath().normalize().toString()) + checkNotNullField(Paths.get("../conf/db/zsv").toAbsolutePath().normalize().toString()) + } + + static void checkNotNullField(String upgradeSchemaDir) { File dir = new File(upgradeSchemaDir) dir.eachFileRecurse { schema -> if (!schema.name.contains("__")){ From c3b3e296e23e40ce328ee52083f10b4faf406b5c Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 3 Feb 2026 10:27:48 +0800 Subject: [PATCH 22/76] [header]: add TPM depandency * add TPM related APIs / entities / configs * SysErrors add error of operation not supported Resolves: ZSV-11310 Resolves: ZSPHER-14 Change-Id: I746861687076626e70627a777469696e67786b66 --- .../org/zstack/compute/vm/VmGlobalConfig.java | 5 + .../compute/vm/devices/TpmApiInterceptor.java | 112 ++++++ .../vm/devices/TpmMessageAutoCompleter.java | 76 ++++ conf/errorCodes/sys.xml | 5 + conf/errorCodes/tpm.xml | 24 ++ conf/serviceConfig/tpm.xml | 22 ++ conf/springConfigXml/Kvm.xml | 7 + conf/springConfigXml/VmInstanceManager.xml | 12 + .../zstack/header/errorcode/SysErrors.java | 1 + .../header/identity/rbac/RBACDescription.java | 23 ++ .../java/org/zstack/header/tpm/RBACInfo.java | 36 ++ .../org/zstack/header/tpm/TpmConstants.java | 17 + .../java/org/zstack/header/tpm/TpmErrors.java | 22 ++ .../zstack/header/tpm/api/APIAddTpmEvent.java | 31 ++ .../tpm/api/APIAddTpmEventDoc_zh_cn.groovy | 32 ++ .../zstack/header/tpm/api/APIAddTpmMsg.java | 51 +++ .../tpm/api/APIAddTpmMsgDoc_zh_cn.groovy | 85 ++++ .../tpm/api/APIGetTpmCapabilityMsg.java | 48 +++ .../APIGetTpmCapabilityMsgDoc_zh_cn.groovy | 67 ++++ .../tpm/api/APIGetTpmCapabilityReply.java | 24 ++ .../APIGetTpmCapabilityReplyDoc_zh_cn.groovy | 32 ++ .../zstack/header/tpm/api/APIQueryTpmMsg.java | 26 ++ .../tpm/api/APIQueryTpmMsgDoc_zh_cn.groovy | 31 ++ .../header/tpm/api/APIQueryTpmReply.java | 28 ++ .../tpm/api/APIQueryTpmReplyDoc_zh_cn.groovy | 32 ++ .../header/tpm/api/APIRemoveTpmEvent.java | 19 + .../tpm/api/APIRemoveTpmEventDoc_zh_cn.groovy | 23 ++ .../header/tpm/api/APIRemoveTpmMsg.java | 49 +++ .../tpm/api/APIRemoveTpmMsgDoc_zh_cn.groovy | 76 ++++ .../header/tpm/api/APIUpdateTpmEvent.java | 31 ++ .../tpm/api/APIUpdateTpmEventDoc_zh_cn.groovy | 32 ++ .../header/tpm/api/APIUpdateTpmMsg.java | 62 +++ .../tpm/api/APIUpdateTpmMsgDoc_zh_cn.groovy | 76 ++++ .../org/zstack/header/tpm/api/TpmMessage.java | 8 + .../header/tpm/entity/TpmCapabilityView.java | 120 ++++++ .../entity/TpmCapabilityViewDoc_zh_cn.groovy | 66 ++++ .../tpm/entity/TpmHostRefInventory.java | 106 +++++ .../TpmHostRefInventoryDoc_zh_cn.groovy | 45 +++ .../header/tpm/entity/TpmHostRefVO.java | 106 +++++ .../header/tpm/entity/TpmHostRefVO_.java | 15 + .../header/tpm/entity/TpmInventory.java | 110 ++++++ .../tpm/entity/TpmInventoryDoc_zh_cn.groovy | 48 +++ .../org/zstack/header/tpm/entity/TpmSpec.java | 30 ++ .../org/zstack/header/tpm/entity/TpmVO.java | 110 ++++++ .../org/zstack/header/tpm/entity/TpmVO_.java | 14 + .../zstack/header/tpm/message/AddTpmMsg.java | 45 +++ .../header/tpm/message/AddTpmReply.java | 16 + .../header/tpm/message/RemoveTpmMsg.java | 32 ++ .../header/tpm/message/RemoveTpmReply.java | 6 + ...ertTemplatedVmInstanceToVmInstanceMsg.java | 11 + ...dVmInstanceToVmInstanceMsgDoc_zh_cn.groovy | 9 + ...eVmInstanceFromVolumeSnapshotGroupMsg.java | 11 + ...FromVolumeSnapshotGroupMsgDoc_zh_cn.groovy | 9 + .../header/vm/APICreateVmInstanceMsg.java | 33 ++ .../org/zstack/header/vm/VmInstanceVO.java | 2 + .../header/vm/devices/VmDevicesSpec.java | 24 ++ .../java/org/zstack/kvm/KVMGlobalConfig.java | 5 + .../java/org/zstack/kvm/KVMSystemTags.java | 4 + .../org/zstack/kvm/tpm/KvmTpmManager.java | 374 ++++++++++++++++++ sdk/src/main/java/SourceClassMap.java | 6 + .../CreateVmFromVolumeBackupAction.java | 3 + .../org/zstack/sdk/CloneVmInstanceAction.java | 3 + ...TemplatedVmInstanceToVmInstanceAction.java | 3 + .../zstack/sdk/CreateVmInstanceAction.java | 3 + ...InstanceFromTemplatedVmInstanceAction.java | 3 + ...InstanceFromVolumeSnapshotGroupAction.java | 3 + .../org/zstack/sdk/tpm/api/AddTpmAction.java | 110 ++++++ .../org/zstack/sdk/tpm/api/AddTpmResult.java | 14 + .../sdk/tpm/api/GetTpmCapabilityAction.java | 98 +++++ .../sdk/tpm/api/GetTpmCapabilityResult.java | 14 + .../zstack/sdk/tpm/api/QueryTpmAction.java | 75 ++++ .../zstack/sdk/tpm/api/QueryTpmResult.java | 22 ++ .../zstack/sdk/tpm/api/RemoveTpmAction.java | 107 +++++ .../zstack/sdk/tpm/api/RemoveTpmResult.java | 7 + .../zstack/sdk/tpm/api/UpdateTpmAction.java | 107 +++++ .../zstack/sdk/tpm/api/UpdateTpmResult.java | 14 + .../sdk/tpm/entity/TpmCapabilityView.java | 79 ++++ .../sdk/tpm/entity/TpmHostRefInventory.java | 55 +++ .../zstack/sdk/tpm/entity/TpmInventory.java | 55 +++ .../test/resources/springConfigXml/Kvm.xml | 7 + .../java/org/zstack/testlib/ApiHelper.groovy | 275 ++++++++++--- .../main/java/org/zstack/utils/StringDSL.java | 6 +- 82 files changed, 3579 insertions(+), 66 deletions(-) create mode 100644 compute/src/main/java/org/zstack/compute/vm/devices/TpmApiInterceptor.java create mode 100644 compute/src/main/java/org/zstack/compute/vm/devices/TpmMessageAutoCompleter.java create mode 100644 conf/errorCodes/tpm.xml create mode 100644 conf/serviceConfig/tpm.xml create mode 100644 header/src/main/java/org/zstack/header/tpm/RBACInfo.java create mode 100644 header/src/main/java/org/zstack/header/tpm/TpmConstants.java create mode 100644 header/src/main/java/org/zstack/header/tpm/TpmErrors.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEvent.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEventDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsgDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsgDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReply.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReplyDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsgDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReply.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReplyDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEvent.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEventDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsgDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEvent.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEventDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsgDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/api/TpmMessage.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmSpec.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java create mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmVO_.java create mode 100644 header/src/main/java/org/zstack/header/tpm/message/AddTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/message/AddTpmReply.java create mode 100644 header/src/main/java/org/zstack/header/tpm/message/RemoveTpmMsg.java create mode 100644 header/src/main/java/org/zstack/header/tpm/message/RemoveTpmReply.java create mode 100644 header/src/main/java/org/zstack/header/vm/devices/VmDevicesSpec.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java create mode 100644 sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java diff --git a/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java b/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java index bd79900c13c..37621cca6a7 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java @@ -133,4 +133,9 @@ public class VmGlobalConfig { @GlobalConfigValidation(validValues = {"None", "AuthenticAMD"}) @BindResourceConfig(value = {VmInstanceVO.class}) public static GlobalConfig VM_CPUID_VENDOR = new GlobalConfig(CATEGORY, "vm.cpuid.vendor"); + + @GlobalConfigDef(defaultValue = "true", type = Boolean.class, description = "whether reset TPM state after VM clone") + @GlobalConfigValidation(validValues = {"true", "false"}) + @BindResourceConfig(value = {VmInstanceVO.class, ClusterVO.class}) + public static GlobalConfig RESET_TPM_AFTER_VM_CLONE = new GlobalConfig(CATEGORY, "reset.tpm.after.vm.clone"); } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/TpmApiInterceptor.java b/compute/src/main/java/org/zstack/compute/vm/devices/TpmApiInterceptor.java new file mode 100644 index 00000000000..78724e72a57 --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/devices/TpmApiInterceptor.java @@ -0,0 +1,112 @@ +package org.zstack.compute.vm.devices; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.core.Platform; +import org.zstack.core.cloudbus.CloudBus; +import org.zstack.core.db.Q; +import org.zstack.header.apimediator.ApiMessageInterceptionException; +import org.zstack.header.apimediator.ApiMessageInterceptor; +import org.zstack.header.apimediator.StopRoutingException; +import org.zstack.header.errorcode.SysErrors; +import org.zstack.header.message.APIMessage; +import org.zstack.header.tpm.api.*; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.header.vm.VmInstanceVO_; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +import static org.zstack.core.Platform.err; +import static org.zstack.header.tpm.TpmConstants.*; +import static org.zstack.header.tpm.TpmErrors.*; +import static org.zstack.header.vm.VmInstanceConstant.KVM_HYPERVISOR_TYPE; + +public class TpmApiInterceptor implements ApiMessageInterceptor { + private static final CLogger logger = Utils.getLogger(TpmApiInterceptor.class); + + @Autowired + private CloudBus bus; + + @Override + public APIMessage intercept(APIMessage msg) throws ApiMessageInterceptionException { + if (msg instanceof APIGetTpmCapabilityMsg) { + validate((APIGetTpmCapabilityMsg) msg); + } else if (msg instanceof APIAddTpmMsg) { + validate((APIAddTpmMsg) msg); + } else if (msg instanceof APIRemoveTpmMsg) { + validate((APIRemoveTpmMsg) msg); + } else if (msg instanceof APIUpdateTpmMsg) { + validate((APIUpdateTpmMsg) msg); + } + + return msg; + } + + private void validate(APIGetTpmCapabilityMsg msg) { + makeSureVmInstanceIsKvmType(msg.getVmInstanceUuid()); + } + + private void validate(APIAddTpmMsg msg) { + makeSureVmInstanceIsKvmType(msg.getVmInstanceUuid()); + + boolean tpmExists = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, msg.getVmInstanceUuid()) + .isExists(); + if (tpmExists) { + throw new ApiMessageInterceptionException(err(TPM_ALREADY_EXISTS, "tpm device already exists")); + } + + boolean vmInSupportState = Q.New(VmInstanceVO.class) + .eq(VmInstanceVO_.uuid, msg.getVmInstanceUuid()) + .in(VmInstanceVO_.state, SUPPORT_VM_STATES_FOR_TPM_OPERATION) + .isExists(); + if (!vmInSupportState) { + throw new ApiMessageInterceptionException(err(VM_STATE_ERROR, + "The current VM state does not support adding TPM operations") + .withOpaque("support.vm.state", SUPPORT_VM_STATES_FOR_TPM_OPERATION)); + } + + if (msg.getResourceUuid() == null) { + msg.setResourceUuid(Platform.getUuid()); + } + } + + private void validate(APIRemoveTpmMsg msg) { + makeSureVmInstanceIsKvmType(msg.getVmInstanceUuid()); + + boolean tpmExists = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, msg.getVmInstanceUuid()) + .isExists(); + if (!tpmExists) { + APIRemoveTpmEvent evt = new APIRemoveTpmEvent(msg.getId()); + bus.publish(evt); + throw new StopRoutingException(); + } + + boolean vmInSupportState = Q.New(VmInstanceVO.class) + .eq(VmInstanceVO_.uuid, msg.getVmInstanceUuid()) + .in(VmInstanceVO_.state, SUPPORT_VM_STATES_FOR_TPM_OPERATION) + .isExists(); + if (!vmInSupportState) { + throw new ApiMessageInterceptionException(err(VM_STATE_ERROR, + "The current VM state does not support removing TPM operations") + .withOpaque("support.vm.state", SUPPORT_VM_STATES_FOR_TPM_OPERATION)); + } + } + + private void validate(APIUpdateTpmMsg msg) { + makeSureVmInstanceIsKvmType(msg.getVmInstanceUuid()); + bus.makeTargetServiceIdByResourceUuid(msg, SERVICE_ID, msg.getTpmUuid()); + } + + private void makeSureVmInstanceIsKvmType(String vmInstanceUuid) { + String hypervisorType = Q.New(VmInstanceVO.class) + .select(VmInstanceVO_.hypervisorType) + .eq(VmInstanceVO_.uuid, vmInstanceUuid) + .findValue(); + if (!KVM_HYPERVISOR_TYPE.equals(hypervisorType)) { + throw new ApiMessageInterceptionException(err(SysErrors.NOT_SUPPORTED, "only allowed for kvm type VM instance")); + } + } +} diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/TpmMessageAutoCompleter.java b/compute/src/main/java/org/zstack/compute/vm/devices/TpmMessageAutoCompleter.java new file mode 100644 index 00000000000..a7024551cc2 --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/devices/TpmMessageAutoCompleter.java @@ -0,0 +1,76 @@ +package org.zstack.compute.vm.devices; + +import org.zstack.core.db.Q; +import org.zstack.header.apimediator.ApiMessageInterceptionException; +import org.zstack.header.apimediator.GlobalApiMessageInterceptor; +import org.zstack.header.message.APIDeleteMessage; +import org.zstack.header.message.APIMessage; +import org.zstack.header.tpm.api.APIGetTpmCapabilityMsg; +import org.zstack.header.tpm.api.APIRemoveTpmMsg; +import org.zstack.header.tpm.api.APIUpdateTpmMsg; +import org.zstack.header.tpm.api.TpmMessage; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; + +import java.util.List; + +import static org.zstack.core.Platform.argerr; +import static org.zstack.core.Platform.err; +import static org.zstack.header.tpm.TpmErrors.TPM_NOT_FOUND; +import static org.zstack.utils.CollectionDSL.list; + +public class TpmMessageAutoCompleter implements GlobalApiMessageInterceptor { + @Override + public APIMessage intercept(APIMessage msg) throws ApiMessageInterceptionException { + if (msg instanceof TpmMessage) { + validateAndComplete((TpmMessage) msg); + } + return msg; + } + + @Override + @SuppressWarnings("rawtypes") + public List getMessageClassToIntercept() { + return list(APIGetTpmCapabilityMsg.class, APIRemoveTpmMsg.class, APIUpdateTpmMsg.class); + } + + @Override + public InterceptorPosition getPosition() { + return InterceptorPosition.FRONT; + } + + private void validateAndComplete(TpmMessage msg) throws ApiMessageInterceptionException { + String tpmUuid = msg.getTpmUuid(); + String vmUuid = msg.getVmInstanceUuid(); + + if (tpmUuid == null && vmUuid == null) { + throw new ApiMessageInterceptionException(argerr("tpmUuid and vmInstanceUuid cannot be null at the same time")); + } + + if (tpmUuid != null && vmUuid != null) { + boolean exists = Q.New(TpmVO.class) + .eq(TpmVO_.uuid, tpmUuid) + .eq(TpmVO_.vmInstanceUuid, vmUuid) + .isExists(); + if (!exists) { + throw new ApiMessageInterceptionException(argerr("tpmUuid[%s] and vmInstanceUuid[%s] are not consistent", tpmUuid, vmUuid)); + } + } else if (vmUuid != null) { + tpmUuid = Q.New(TpmVO.class) + .select(TpmVO_.uuid) + .eq(TpmVO_.vmInstanceUuid, vmUuid) + .findValue(); + if (tpmUuid == null && (!(msg instanceof APIDeleteMessage))) { + throw new ApiMessageInterceptionException(err(TPM_NOT_FOUND, "tpm for vm[%s] does not exist", vmUuid)); + } else { + msg.setTpmUuid(tpmUuid); + } + } else { + vmUuid = Q.New(TpmVO.class) + .select(TpmVO_.vmInstanceUuid) + .eq(TpmVO_.uuid, tpmUuid) + .findValue(); + msg.setVmInstanceUuid(vmUuid); + } + } +} diff --git a/conf/errorCodes/sys.xml b/conf/errorCodes/sys.xml index c5b763f86ba..b484cbe5c1d 100755 --- a/conf/errorCodes/sys.xml +++ b/conf/errorCodes/sys.xml @@ -105,5 +105,10 @@ 1090 Multiple reasons + + + 1091 + Operation not supported + diff --git a/conf/errorCodes/tpm.xml b/conf/errorCodes/tpm.xml new file mode 100644 index 00000000000..e93b159fa71 --- /dev/null +++ b/conf/errorCodes/tpm.xml @@ -0,0 +1,24 @@ + + TPM + + + 1000 + General error + + + + 1701 + TPM already exists in this VM + + + + 1702 + TPM not found + + + + 1703 + The current VM state does not support this TPM operations + + + diff --git a/conf/serviceConfig/tpm.xml b/conf/serviceConfig/tpm.xml new file mode 100644 index 00000000000..dab0467a8f6 --- /dev/null +++ b/conf/serviceConfig/tpm.xml @@ -0,0 +1,22 @@ + + + tpm + TpmApiInterceptor + + + org.zstack.header.tpm.api.APIAddTpmMsg + + + org.zstack.header.tpm.api.APIGetTpmCapabilityMsg + + + org.zstack.header.tpm.api.APIQueryTpmMsg + query + + + org.zstack.header.tpm.api.APIRemoveTpmMsg + + + org.zstack.header.tpm.api.APIUpdateTpmMsg + + diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 3fcef810769..7a7160db29a 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -244,4 +244,11 @@ + + + + + + + diff --git a/conf/springConfigXml/VmInstanceManager.xml b/conf/springConfigXml/VmInstanceManager.xml index 20e094378aa..8d3f7cc41e6 100755 --- a/conf/springConfigXml/VmInstanceManager.xml +++ b/conf/springConfigXml/VmInstanceManager.xml @@ -267,4 +267,16 @@ + + + + + + + + + + + + diff --git a/header/src/main/java/org/zstack/header/errorcode/SysErrors.java b/header/src/main/java/org/zstack/header/errorcode/SysErrors.java index c59fba9c5b5..79e27fa69ac 100755 --- a/header/src/main/java/org/zstack/header/errorcode/SysErrors.java +++ b/header/src/main/java/org/zstack/header/errorcode/SysErrors.java @@ -26,6 +26,7 @@ public enum SysErrors { // ZSphere only MULTIPLE_REASONS(1090), + NOT_SUPPORTED(1091), ; private String code; diff --git a/header/src/main/java/org/zstack/header/identity/rbac/RBACDescription.java b/header/src/main/java/org/zstack/header/identity/rbac/RBACDescription.java index 02d7b038c92..5b379e19fd7 100755 --- a/header/src/main/java/org/zstack/header/identity/rbac/RBACDescription.java +++ b/header/src/main/java/org/zstack/header/identity/rbac/RBACDescription.java @@ -37,6 +37,29 @@ default RBAC.AttributeSupportResourceBuilder attributeSupportResourceBuilder() { return new RBAC.AttributeSupportResourceBuilder(); } + /** + * If you want to contribute a resource to a resource ensemble, you can use this method: + * + * Ex: (Make TpmVO as a child resource of VmInstanceVO) + *
+     * resourceEnsembleContributorBuilder()
+     *     .resource(TpmVO.class)
+     *     .contributeTo(VmInstanceVO.class)
+     *     .build();
+     * 
+ * + * You must set @EntityGraph.Neighbour on VmInstanceVO.class + *
+     * \@EntityGraph(
+     *         friends = {
+     *                 \@EntityGraph.Neighbour(type = TpmVO.class, myField = "uuid", targetField = "vmInstanceUuid"),
+     *         }
+     * )
+     * 
+ * + * or use {@link org.zstack.header.identity.rbac.RBAC.ResourceEnsembleContributorBuilder#resourceWithCustomizeFindingMethods(java.lang.Class, java.util.function.Consumer, java.util.function.Consumer)} + * to specify how to find the resource by SQL. + */ default RBAC.ResourceEnsembleContributorBuilder resourceEnsembleContributorBuilder() { return new RBAC.ResourceEnsembleContributorBuilder(); } diff --git a/header/src/main/java/org/zstack/header/tpm/RBACInfo.java b/header/src/main/java/org/zstack/header/tpm/RBACInfo.java new file mode 100644 index 00000000000..b8b56608d86 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/RBACInfo.java @@ -0,0 +1,36 @@ +package org.zstack.header.tpm; + +import org.zstack.header.identity.rbac.RBACDescription; +import org.zstack.header.rest.SDKPackage; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.vm.VmInstanceVO; + +@SDKPackage(packageName="org.zstack.sdk.tpm") +public class RBACInfo implements RBACDescription { + @Override + public String permissionName() { + return "tpm"; + } + + @Override + public void permissions() { + permissionBuilder() + .communityAvailable() + .zsvBasicAvailable() + .zsvProAvailable() + .build(); + + resourceEnsembleContributorBuilder() + .resource(TpmVO.class) + .contributeTo(VmInstanceVO.class) + .build(); + } + + @Override + public void roles() { + roleContributorBuilder() + .actionsInThisPermission() + .toOtherRole() + .build(); + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/TpmConstants.java b/header/src/main/java/org/zstack/header/tpm/TpmConstants.java new file mode 100644 index 00000000000..4654c5c956d --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/TpmConstants.java @@ -0,0 +1,17 @@ +package org.zstack.header.tpm; + +import org.zstack.header.vm.VmInstanceState; + +import java.util.Collections; +import java.util.List; + +import static org.zstack.utils.CollectionDSL.list; + +public class TpmConstants { + private TpmConstants() {} + + public static final String SERVICE_ID = "tpm"; + + public static final List SUPPORT_VM_STATES_FOR_TPM_OPERATION = + Collections.unmodifiableList(list(VmInstanceState.Stopped)); +} diff --git a/header/src/main/java/org/zstack/header/tpm/TpmErrors.java b/header/src/main/java/org/zstack/header/tpm/TpmErrors.java new file mode 100644 index 00000000000..cb9eebb68c2 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/TpmErrors.java @@ -0,0 +1,22 @@ +package org.zstack.header.tpm; + +public enum TpmErrors { + GENERAL_ERROR(1000), + + // INVALID_ARGUMENT, 17xx <- SYS.1007 + TPM_ALREADY_EXISTS(1701), + TPM_NOT_FOUND(1702), + VM_STATE_ERROR(1703), + ; + + private String code; + + private TpmErrors(int id) { + code = String.format("TPM.%s", id); + } + + @Override + public String toString() { + return code; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEvent.java b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEvent.java new file mode 100644 index 00000000000..4106a67afdd --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEvent.java @@ -0,0 +1,31 @@ +package org.zstack.header.tpm.api; + +import org.zstack.header.message.APIEvent; +import org.zstack.header.rest.RestResponse; +import org.zstack.header.tpm.entity.TpmInventory; + +@RestResponse(allTo = "inventory") +public class APIAddTpmEvent extends APIEvent { + private TpmInventory inventory; + + public APIAddTpmEvent() { + } + + public APIAddTpmEvent(String apiId) { + super(apiId); + } + + public TpmInventory getInventory() { + return inventory; + } + + public void setInventory(TpmInventory inventory) { + this.inventory = inventory; + } + + public static APIAddTpmEvent __example__() { + APIAddTpmEvent event = new APIAddTpmEvent(); + event.setInventory(TpmInventory.__example__()); + return event; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEventDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEventDoc_zh_cn.groovy new file mode 100644 index 00000000000..31d912bd097 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmEventDoc_zh_cn.groovy @@ -0,0 +1,32 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.entity.TpmInventory +import org.zstack.header.errorcode.ErrorCode + +doc { + + title "虚拟机添加 TPM 的结果" + + ref { + name "inventory" + path "org.zstack.header.tpm.api.APIAddTpmEvent.inventory" + desc "TPM 信息" + type "TpmInventory" + since "5.0.0" + clz TpmInventory.class + } + field { + name "success" + desc "添加是否成功" + type "boolean" + since "5.0.0" + } + ref { + name "error" + path "org.zstack.header.tpm.api.APIAddTpmEvent.error" + desc "错误码,若不为 null,则表示操作失败, 操作成功时该字段为 null" + type "ErrorCode" + since "5.0.0" + clz ErrorCode.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsg.java new file mode 100644 index 00000000000..3acea51c0fc --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsg.java @@ -0,0 +1,51 @@ +package org.zstack.header.tpm.api; + +import org.springframework.http.HttpMethod; +import org.zstack.header.message.APICreateMessage; +import org.zstack.header.message.APIParam; +import org.zstack.header.message.DocUtils; +import org.zstack.header.rest.RestRequest; +import org.zstack.header.vm.VmInstanceMessage; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.utils.StringDSL; + +@RestRequest( + path = "/tpms", + method = HttpMethod.POST, + responseClass = APIAddTpmEvent.class, + parameterName = "params" +) +public class APIAddTpmMsg extends APICreateMessage implements VmInstanceMessage { + /** + * If null, use the default key provider from global config (if set). + */ + @APIParam(required = false, minLength = 32, maxLength = 32) + private String keyProviderUuid; + + @APIParam(resourceType = VmInstanceVO.class) + private String vmInstanceUuid; + + public String getKeyProviderUuid() { + return keyProviderUuid; + } + + public void setKeyProviderUuid(String keyProviderUuid) { + this.keyProviderUuid = keyProviderUuid; + } + + @Override + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public static APIAddTpmMsg __example__() { + APIAddTpmMsg msg = new APIAddTpmMsg(); + msg.setKeyProviderUuid(StringDSL.createFixedUuid("keyProviderUuid")); + msg.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsgDoc_zh_cn.groovy new file mode 100644 index 00000000000..465d0ddd7c5 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIAddTpmMsgDoc_zh_cn.groovy @@ -0,0 +1,85 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.api.APIAddTpmEvent + +doc { + title "AddTpm" + + category "tpm" + + desc """虚拟机添加 TPM""" + + rest { + request { + url "POST /v1/tpms" + + header (Authorization: 'OAuth the-session-uuid') + + clz APIAddTpmMsg.class + + desc """""" + + params { + + column { + name "keyProviderUuid" + enclosedIn "params" + desc "密钥提供程序 UUID" + location "body" + type "String" + optional true + since "5.0.0" + } + column { + name "vmInstanceUuid" + enclosedIn "params" + desc "虚拟机 UUID" + location "body" + type "String" + optional false + since "5.0.0" + } + column { + name "resourceUuid" + enclosedIn "params" + desc "资源 UUID" + location "body" + type "String" + optional true + since "5.0.0" + } + column { + name "tagUuids" + enclosedIn "params" + desc "标签 UUID 列表" + location "body" + type "List" + optional true + since "5.0.0" + } + column { + name "systemTags" + enclosedIn "" + desc "系统标签" + location "body" + type "List" + optional true + since "5.0.0" + } + column { + name "userTags" + enclosedIn "" + desc "用户标签" + location "body" + type "List" + optional true + since "5.0.0" + } + } + } + + response { + clz APIAddTpmEvent.class + } + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsg.java b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsg.java new file mode 100644 index 00000000000..c687bd0e576 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsg.java @@ -0,0 +1,48 @@ +package org.zstack.header.tpm.api; + +import org.springframework.http.HttpMethod; +import org.zstack.header.message.APIParam; +import org.zstack.header.message.APISyncCallMessage; +import org.zstack.header.message.DocUtils; +import org.zstack.header.rest.RestRequest; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.vm.VmInstanceVO; + +@RestRequest( + path = "/tpms/capability", + method = HttpMethod.GET, + responseClass = APIGetTpmCapabilityReply.class +) +public class APIGetTpmCapabilityMsg extends APISyncCallMessage implements TpmMessage { + @APIParam(required = false, resourceType = TpmVO.class) + private String tpmUuid; + + @APIParam(required = false, resourceType = VmInstanceVO.class) + private String vmInstanceUuid; + + @Override + public String getTpmUuid() { + return tpmUuid; + } + + @Override + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + @Override + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + @Override + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public static APIGetTpmCapabilityMsg __example__() { + APIGetTpmCapabilityMsg msg = new APIGetTpmCapabilityMsg(); + msg.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsgDoc_zh_cn.groovy new file mode 100644 index 00000000000..d5d3bb770aa --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityMsgDoc_zh_cn.groovy @@ -0,0 +1,67 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.api.APIGetTpmCapabilityReply + +doc { + title "GetTpmCapability" + + category "tpm" + + desc """获取 TPM 详情数据""" + + rest { + request { + url "GET /v1/tpms/capability" + + header (Authorization: 'OAuth the-session-uuid') + + clz APIGetTpmCapabilityMsg.class + + desc """""" + + params { + + column { + name "tpmUuid" + enclosedIn "" + desc "TPM UUID" + location "query" + type "String" + optional true + since "5.0.0" + } + column { + name "vmInstanceUuid" + enclosedIn "" + desc "虚拟机 UUID" + location "query" + type "String" + optional true + since "5.0.0" + } + column { + name "systemTags" + enclosedIn "" + desc "系统标签" + location "query" + type "List" + optional true + since "5.0.0" + } + column { + name "userTags" + enclosedIn "" + desc "用户标签" + location "query" + type "List" + optional true + since "5.0.0" + } + } + } + + response { + clz APIGetTpmCapabilityReply.class + } + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReply.java b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReply.java new file mode 100644 index 00000000000..4819ccfc1aa --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReply.java @@ -0,0 +1,24 @@ +package org.zstack.header.tpm.api; + +import org.zstack.header.message.APIReply; +import org.zstack.header.rest.RestResponse; +import org.zstack.header.tpm.entity.TpmCapabilityView; + +@RestResponse(fieldsTo = "all") +public class APIGetTpmCapabilityReply extends APIReply { + private TpmCapabilityView inventory; + + public TpmCapabilityView getInventory() { + return inventory; + } + + public void setInventory(TpmCapabilityView inventory) { + this.inventory = inventory; + } + + public static APIGetTpmCapabilityReply __example__() { + APIGetTpmCapabilityReply reply = new APIGetTpmCapabilityReply(); + reply.setInventory(TpmCapabilityView.__example__()); + return reply; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReplyDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReplyDoc_zh_cn.groovy new file mode 100644 index 00000000000..216b1ba86b8 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIGetTpmCapabilityReplyDoc_zh_cn.groovy @@ -0,0 +1,32 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.entity.TpmCapabilityView +import org.zstack.header.errorcode.ErrorCode + +doc { + + title "获取 TPM 详情数据的结果" + + ref { + name "inventory" + path "org.zstack.header.tpm.api.APIGetTpmCapabilityReply.inventory" + desc "TPM 性能和信息数据" + type "TpmCapabilityView" + since "5.0.0" + clz TpmCapabilityView.class + } + field { + name "success" + desc "获取是否成功" + type "boolean" + since "5.0.0" + } + ref { + name "error" + path "org.zstack.header.tpm.api.APIGetTpmCapabilityReply.error" + desc "错误码,若不为 null,则表示操作失败, 操作成功时该字段为 null" + type "ErrorCode" + since "5.0.0" + clz ErrorCode.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsg.java new file mode 100644 index 00000000000..00730943d90 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsg.java @@ -0,0 +1,26 @@ +package org.zstack.header.tpm.api; + +import org.springframework.http.HttpMethod; +import org.zstack.header.message.DocUtils; +import org.zstack.header.query.APIQueryMessage; +import org.zstack.header.query.AutoQuery; +import org.zstack.header.rest.RestRequest; +import org.zstack.header.tpm.entity.TpmInventory; +import org.zstack.header.tpm.entity.TpmVO; + +import java.util.List; + +import static java.util.Arrays.asList; + +@AutoQuery(replyClass = APIQueryTpmReply.class, inventoryClass = TpmInventory.class) +@RestRequest( + path = "/tpms", + optionalPaths = {"/tpms/{uuid}"}, + method = HttpMethod.GET, + responseClass = APIQueryTpmReply.class +) +public class APIQueryTpmMsg extends APIQueryMessage { + public static List __example__() { + return asList("uuid=" + DocUtils.createFixedUuid(TpmVO.class)); + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsgDoc_zh_cn.groovy new file mode 100644 index 00000000000..2254f22e12f --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmMsgDoc_zh_cn.groovy @@ -0,0 +1,31 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.api.APIQueryTpmReply +import org.zstack.header.query.APIQueryMessage + +doc { + title "QueryTpm" + + category "tpm" + + desc """查询 TPM""" + + rest { + request { + url "GET /v1/tpms" + url "GET /v1/tpms/{uuid}" + + header (Authorization: 'OAuth the-session-uuid') + + clz APIQueryTpmMsg.class + + desc """""" + + params APIQueryMessage.class + } + + response { + clz APIQueryTpmReply.class + } + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReply.java b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReply.java new file mode 100644 index 00000000000..9262fd88081 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReply.java @@ -0,0 +1,28 @@ +package org.zstack.header.tpm.api; + +import org.zstack.header.query.APIQueryReply; +import org.zstack.header.rest.RestResponse; +import org.zstack.header.tpm.entity.TpmInventory; + +import java.util.List; + +import static org.zstack.utils.CollectionDSL.list; + +@RestResponse(allTo = "inventories") +public class APIQueryTpmReply extends APIQueryReply { + private List inventories; + + public List getInventories() { + return inventories; + } + + public void setInventories(List inventories) { + this.inventories = inventories; + } + + public static APIQueryTpmReply __example__() { + APIQueryTpmReply reply = new APIQueryTpmReply(); + reply.setInventories(list(TpmInventory.__example__())); + return reply; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReplyDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReplyDoc_zh_cn.groovy new file mode 100644 index 00000000000..bff2c89f924 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIQueryTpmReplyDoc_zh_cn.groovy @@ -0,0 +1,32 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.entity.TpmInventory +import org.zstack.header.errorcode.ErrorCode + +doc { + + title "查询 TPM 的结果" + + ref { + name "inventories" + path "org.zstack.header.tpm.api.APIQueryTpmReply.inventories" + desc "TPM 列表" + type "List" + since "5.0.0" + clz TpmInventory.class + } + field { + name "success" + desc "查询是否成功" + type "boolean" + since "5.0.0" + } + ref { + name "error" + path "org.zstack.header.tpm.api.APIQueryTpmReply.error" + desc "错误码,若不为 null,则表示操作失败, 操作成功时该字段为 null" + type "ErrorCode" + since "5.0.0" + clz ErrorCode.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEvent.java b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEvent.java new file mode 100644 index 00000000000..7356c2aafa4 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEvent.java @@ -0,0 +1,19 @@ +package org.zstack.header.tpm.api; + +import org.zstack.header.message.APIEvent; +import org.zstack.header.rest.RestResponse; + +@RestResponse +public class APIRemoveTpmEvent extends APIEvent { + public APIRemoveTpmEvent(String apiId) { + super(apiId); + } + + public APIRemoveTpmEvent() { + super(null); + } + + public static APIRemoveTpmEvent __example__() { + return new APIRemoveTpmEvent(); + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEventDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEventDoc_zh_cn.groovy new file mode 100644 index 00000000000..bd77dfe1d5f --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmEventDoc_zh_cn.groovy @@ -0,0 +1,23 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.errorcode.ErrorCode + +doc { + + title "虚拟机删除 TPM 的结果" + + field { + name "success" + desc "删除是否成功" + type "boolean" + since "5.0.0" + } + ref { + name "error" + path "org.zstack.header.tpm.api.APIRemoveTpmEvent.error" + desc "错误码,若不为 null,则表示操作失败, 操作成功时该字段为 null" + type "ErrorCode" + since "5.0.0" + clz ErrorCode.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsg.java new file mode 100644 index 00000000000..5b5bc148e09 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsg.java @@ -0,0 +1,49 @@ +package org.zstack.header.tpm.api; + +import org.springframework.http.HttpMethod; +import org.zstack.header.message.APIDeleteMessage; +import org.zstack.header.message.APIParam; +import org.zstack.header.message.DocUtils; +import org.zstack.header.rest.RestRequest; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.vm.VmInstanceMessage; +import org.zstack.header.vm.VmInstanceVO; + +@RestRequest( + path = "/tpms", + method = HttpMethod.DELETE, + responseClass = APIRemoveTpmEvent.class +) +public class APIRemoveTpmMsg extends APIDeleteMessage implements VmInstanceMessage, TpmMessage { + @APIParam(required = false, resourceType = TpmVO.class, successIfResourceNotExisting = true) + private String tpmUuid; + + @APIParam(required = false, resourceType = VmInstanceVO.class) + private String vmInstanceUuid; + + @Override + public String getTpmUuid() { + return tpmUuid; + } + + @Override + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + @Override + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + @Override + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public static APIRemoveTpmMsg __example__() { + APIRemoveTpmMsg msg = new APIRemoveTpmMsg(); + msg.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsgDoc_zh_cn.groovy new file mode 100644 index 00000000000..c10c5e8526d --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIRemoveTpmMsgDoc_zh_cn.groovy @@ -0,0 +1,76 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.api.APIRemoveTpmEvent + +doc { + title "RemoveTpm" + + category "tpm" + + desc """虚拟机删除 TPM""" + + rest { + request { + url "DELETE /v1/tpms" + + header (Authorization: 'OAuth the-session-uuid') + + clz APIRemoveTpmMsg.class + + desc """""" + + params { + + column { + name "tpmUuid" + enclosedIn "" + desc "TPM UUID" + location "query" + type "String" + optional true + since "5.0.0" + } + column { + name "vmInstanceUuid" + enclosedIn "" + desc "虚拟机 UUID" + location "query" + type "String" + optional true + since "5.0.0" + } + column { + name "deleteMode" + enclosedIn "" + desc "删除模式(Permissive / Enforcing,Permissive)" + location "query" + type "String" + optional true + since "5.0.0" + } + column { + name "systemTags" + enclosedIn "" + desc "系统标签" + location "query" + type "List" + optional true + since "5.0.0" + } + column { + name "userTags" + enclosedIn "" + desc "用户标签" + location "query" + type "List" + optional true + since "5.0.0" + } + } + } + + response { + clz APIRemoveTpmEvent.class + } + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEvent.java b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEvent.java new file mode 100644 index 00000000000..3d03410e053 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEvent.java @@ -0,0 +1,31 @@ +package org.zstack.header.tpm.api; + +import org.zstack.header.message.APIEvent; +import org.zstack.header.rest.RestResponse; +import org.zstack.header.tpm.entity.TpmInventory; + +@RestResponse(allTo = "inventory") +public class APIUpdateTpmEvent extends APIEvent { + private TpmInventory inventory; + + public APIUpdateTpmEvent() { + } + + public APIUpdateTpmEvent(String apiId) { + super(apiId); + } + + public TpmInventory getInventory() { + return inventory; + } + + public void setInventory(TpmInventory inventory) { + this.inventory = inventory; + } + + public static APIUpdateTpmEvent __example__() { + APIUpdateTpmEvent event = new APIUpdateTpmEvent(); + event.setInventory(TpmInventory.__example__()); + return event; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEventDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEventDoc_zh_cn.groovy new file mode 100644 index 00000000000..33f2e7dab69 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmEventDoc_zh_cn.groovy @@ -0,0 +1,32 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.entity.TpmInventory +import org.zstack.header.errorcode.ErrorCode + +doc { + + title "更新 TPM 的结果" + + ref { + name "inventory" + path "org.zstack.header.tpm.api.APIUpdateTpmEvent.inventory" + desc "更新后的 TPM 信息" + type "TpmInventory" + since "5.0.0" + clz TpmInventory.class + } + field { + name "success" + desc "更新是否成功" + type "boolean" + since "5.0.0" + } + ref { + name "error" + path "org.zstack.header.tpm.api.APIUpdateTpmEvent.error" + desc "错误码,若不为 null,则表示操作失败, 操作成功时该字段为 null" + type "ErrorCode" + since "5.0.0" + clz ErrorCode.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsg.java new file mode 100644 index 00000000000..4e3f264d3c3 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsg.java @@ -0,0 +1,62 @@ +package org.zstack.header.tpm.api; + +import org.springframework.http.HttpMethod; +import org.zstack.header.message.APIMessage; +import org.zstack.header.message.APIParam; +import org.zstack.header.message.DocUtils; +import org.zstack.header.rest.RestRequest; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.utils.StringDSL; + +@RestRequest( + path = "/tpms", + method = HttpMethod.PUT, + isAction = true, + responseClass = APIUpdateTpmEvent.class +) +public class APIUpdateTpmMsg extends APIMessage implements TpmMessage { + @APIParam(required = false, resourceType = VmInstanceVO.class) + private String vmInstanceUuid; + + @APIParam(required = false, resourceType = TpmVO.class) + private String tpmUuid; + + @APIParam(required = false, minLength = 32, maxLength = 32) + private String keyProviderUuid; + + @Override + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + @Override + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + @Override + public String getTpmUuid() { + return tpmUuid; + } + + @Override + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + public String getKeyProviderUuid() { + return keyProviderUuid; + } + + public void setKeyProviderUuid(String keyProviderUuid) { + this.keyProviderUuid = keyProviderUuid; + } + + public static APIUpdateTpmMsg __example__() { + APIUpdateTpmMsg msg = new APIUpdateTpmMsg(); + msg.setKeyProviderUuid(StringDSL.createFixedUuid("keyProviderUuid")); + msg.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsgDoc_zh_cn.groovy new file mode 100644 index 00000000000..4147d9b6554 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/APIUpdateTpmMsgDoc_zh_cn.groovy @@ -0,0 +1,76 @@ +package org.zstack.header.tpm.api + +import org.zstack.header.tpm.api.APIUpdateTpmEvent + +doc { + title "UpdateTpm" + + category "tpm" + + desc """更新 TPM""" + + rest { + request { + url "PUT /v1/tpms" + + header (Authorization: 'OAuth the-session-uuid') + + clz APIUpdateTpmMsg.class + + desc """""" + + params { + + column { + name "vmInstanceUuid" + enclosedIn "updateTpm" + desc "虚拟机 UUID" + location "body" + type "String" + optional true + since "5.0.0" + } + column { + name "tpmUuid" + enclosedIn "updateTpm" + desc "TPM UUID" + location "body" + type "String" + optional true + since "5.0.0" + } + column { + name "keyProviderUuid" + enclosedIn "updateTpm" + desc "密钥提供程序 UUID" + location "body" + type "String" + optional true + since "5.0.0" + } + column { + name "systemTags" + enclosedIn "" + desc "系统标签" + location "body" + type "List" + optional true + since "5.0.0" + } + column { + name "userTags" + enclosedIn "" + desc "用户标签" + location "body" + type "List" + optional true + since "5.0.0" + } + } + } + + response { + clz APIUpdateTpmEvent.class + } + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/tpm/api/TpmMessage.java b/header/src/main/java/org/zstack/header/tpm/api/TpmMessage.java new file mode 100644 index 00000000000..1ae70c46381 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/api/TpmMessage.java @@ -0,0 +1,8 @@ +package org.zstack.header.tpm.api; + +public interface TpmMessage { + String getVmInstanceUuid(); + void setVmInstanceUuid(String vmInstanceUuid); + String getTpmUuid(); + void setTpmUuid(String tpmUuid); +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java new file mode 100644 index 00000000000..2b60316df52 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java @@ -0,0 +1,120 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.configuration.PythonClass; + +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.List; + +@PythonClass +public class TpmCapabilityView { + // fields in TpmInventory + private String uuid; + private String name; + private String vmInstanceUuid; + private Timestamp createDate; + private Timestamp lastOpDate; + private List hostRefs; + + // related table fields + // TODO keyProviderUuid / keyProviderType / keyProviderName / keyProviderKeyVersion + + // status fields : from system tags + private String edkVersion; + private String swtpmVersion; + + // config fields : from global / resource config + private boolean resetTpmAfterVmCloneConfig; + + public void setTpmInventory(TpmInventory inventory) { + setUuid(inventory.getUuid()); + setName(inventory.getName()); + setVmInstanceUuid(inventory.getVmInstanceUuid()); + setCreateDate(inventory.getCreateDate()); + setLastOpDate(inventory.getLastOpDate()); + setHostRefs(new ArrayList<>(inventory.getHostRefs())); + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + public List getHostRefs() { + return hostRefs; + } + + public void setHostRefs(List hostRefs) { + this.hostRefs = hostRefs; + } + + public String getEdkVersion() { + return edkVersion; + } + + public void setEdkVersion(String edkVersion) { + this.edkVersion = edkVersion; + } + + public String getSwtpmVersion() { + return swtpmVersion; + } + + public void setSwtpmVersion(String swtpmVersion) { + this.swtpmVersion = swtpmVersion; + } + + public boolean isResetTpmAfterVmCloneConfig() { + return resetTpmAfterVmCloneConfig; + } + + public void setResetTpmAfterVmCloneConfig(boolean resetTpmAfterVmCloneConfig) { + this.resetTpmAfterVmCloneConfig = resetTpmAfterVmCloneConfig; + } + + public static TpmCapabilityView __example__() { + TpmCapabilityView view = new TpmCapabilityView(); + view.setTpmInventory(TpmInventory.__example__()); + + view.setEdkVersion("edk2-ovmf-20220126gitbb1bba3d77-3.el8.noarch"); + view.setSwtpmVersion("0.8.2"); + + view.setResetTpmAfterVmCloneConfig(true); + return view; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy new file mode 100644 index 00000000000..a286e9d74c7 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy @@ -0,0 +1,66 @@ +package org.zstack.header.tpm.entity + +import java.sql.Timestamp +import org.zstack.header.tpm.entity.TpmHostRefInventory + +doc { + + title "TPM 详情" + + field { + name "uuid" + desc "TPM UUID" + type "String" + since "5.0.0" + } + field { + name "name" + desc "TPM 资源名称" + type "String" + since "5.0.0" + } + field { + name "vmInstanceUuid" + desc "虚拟机 UUID" + type "String" + since "5.0.0" + } + field { + name "createDate" + desc "创建时间" + type "Timestamp" + since "5.0.0" + } + field { + name "lastOpDate" + desc "最后一次修改时间" + type "Timestamp" + since "5.0.0" + } + ref { + name "hostRefs" + path "org.zstack.header.tpm.entity.TpmCapabilityView.hostRefs" + desc "TPM 与主机的相关数据列表" + type "List" + since "5.0.0" + clz TpmHostRefInventory.class + } + field { + name "edkVersion" + desc "EDK 套件版本" + type "String" + since "5.0.0" + } + field { + name "swtpmVersion" + desc "SWTPM 版本" + type "String" + since "5.0.0" + } + field { + name "resetTpmAfterVmCloneConfig" + desc "是否在虚拟机克隆后重置 TPM 状态的配置" + type "boolean" + since "5.0.0" + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java new file mode 100644 index 00000000000..3253723c0d0 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java @@ -0,0 +1,106 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.host.HostInventory; +import org.zstack.header.host.HostVO; +import org.zstack.header.message.DocUtils; +import org.zstack.header.query.ExpandedQueries; +import org.zstack.header.query.ExpandedQuery; +import org.zstack.header.search.Inventory; + +import java.sql.Timestamp; +import java.util.Collection; +import java.util.List; + +import static org.zstack.utils.CollectionUtils.transform; + +@Inventory(mappingVOClass = TpmHostRefVO.class) +@ExpandedQueries({ + @ExpandedQuery(expandedField = "tpm", inventoryClass = TpmInventory.class, + foreignKey = "tpmUuid", expandedInventoryKey = "uuid"), + @ExpandedQuery(expandedField = "host", inventoryClass = HostInventory.class, + foreignKey = "hostUuid", expandedInventoryKey = "uuid"), +}) +public class TpmHostRefInventory { + private long id; + private String tpmUuid; + private String hostUuid; + private String path; + private Timestamp createDate; + private Timestamp lastOpDate; + + public TpmHostRefInventory() { + } + + public static TpmHostRefInventory valueOf(TpmHostRefVO vo) { + TpmHostRefInventory inv = new TpmHostRefInventory(); + inv.setId(vo.getId()); + inv.setTpmUuid(vo.getTpmUuid()); + inv.setHostUuid(vo.getHostUuid()); + inv.setPath(vo.getPath()); + inv.setCreateDate(vo.getCreateDate()); + inv.setLastOpDate(vo.getLastOpDate()); + return inv; + } + + public static List valueOf(Collection vos) { + return transform(vos, TpmHostRefInventory::valueOf); + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public String getTpmUuid() { + return tpmUuid; + } + + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + public static TpmHostRefInventory __example__() { + TpmHostRefInventory ref = new TpmHostRefInventory(); + ref.setId(1L); + ref.setTpmUuid(DocUtils.createFixedUuid(TpmVO.class)); + ref.setHostUuid(DocUtils.createFixedUuid(HostVO.class)); + ref.setCreateDate(DocUtils.timestamp()); + ref.setLastOpDate(DocUtils.timestamp()); + return ref; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy new file mode 100644 index 00000000000..d8330cb7ff6 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy @@ -0,0 +1,45 @@ +package org.zstack.header.tpm.entity + +import java.sql.Timestamp + +doc { + + title "TPM 与主机的相关数据" + + field { + name "id" + desc "自增主键" + type "long" + since "5.0.0" + } + field { + name "tpmUuid" + desc "TPM UUID" + type "String" + since "5.0.0" + } + field { + name "hostUuid" + desc "主机 UUID" + type "String" + since "5.0.0" + } + field { + name "path" + desc "遗留 TPM 状态文件的位置" + type "String" + since "5.0.0" + } + field { + name "createDate" + desc "创建时间" + type "Timestamp" + since "5.0.0" + } + field { + name "lastOpDate" + desc "最后一次修改时间" + type "Timestamp" + since "5.0.0" + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java new file mode 100644 index 00000000000..38bdbf5049f --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java @@ -0,0 +1,106 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.host.HostVO; +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.ToInventory; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import java.sql.Timestamp; + +@Entity +@Table +@EntityGraph( + friends = { + @EntityGraph.Neighbour(type = TpmVO.class, myField = "tpmUuid", targetField = "uuid"), + @EntityGraph.Neighbour(type = HostVO.class, myField = "hostUuid", targetField = "uuid"), + } +) +public class TpmHostRefVO implements ToInventory { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column + private long id; + + @Column + @ForeignKey(parentEntityClass = TpmVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String tpmUuid; + + @Column + @ForeignKey(parentEntityClass = HostVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String hostUuid; + + @Column + private String path; + + @Column + private Timestamp createDate; + + @Column + private Timestamp lastOpDate; + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public String getTpmUuid() { + return tpmUuid; + } + + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + @Override + public String toString() { + return "TpmHostRefVO{" + + "id=" + id + + ", tpmUuid='" + tpmUuid + '\'' + + ", hostUuid='" + hostUuid + '\'' + + ", path='" + path + '\'' + + ", createDate=" + createDate + + ", lastOpDate=" + lastOpDate + + '}'; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java new file mode 100644 index 00000000000..ee4d9654711 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java @@ -0,0 +1,15 @@ +package org.zstack.header.tpm.entity; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(TpmHostRefVO.class) +public class TpmHostRefVO_ { + public static volatile SingularAttribute id; + public static volatile SingularAttribute tpmUuid; + public static volatile SingularAttribute hostUuid; + public static volatile SingularAttribute path; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java new file mode 100644 index 00000000000..70ea376e643 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java @@ -0,0 +1,110 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.configuration.PythonClassInventory; +import org.zstack.header.message.DocUtils; +import org.zstack.header.query.ExpandedQueries; +import org.zstack.header.query.ExpandedQuery; +import org.zstack.header.search.Inventory; +import org.zstack.header.vm.VmInstanceInventory; +import org.zstack.header.vm.VmInstanceVO; + +import java.io.Serializable; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static org.zstack.utils.CollectionDSL.list; +import static org.zstack.utils.CollectionUtils.transform; + +@PythonClassInventory +@Inventory(mappingVOClass = TpmVO.class) +@ExpandedQueries({ + @ExpandedQuery(expandedField = "vmInstance", inventoryClass = VmInstanceInventory.class, + foreignKey = "vmInstanceUuid", expandedInventoryKey = "uuid"), +}) +public class TpmInventory implements Serializable { + private String uuid; + private String name; + private String vmInstanceUuid; + private Timestamp createDate; + private Timestamp lastOpDate; + private List hostRefs = new ArrayList<>(); + + public TpmInventory() { + } + + public static TpmInventory valueOf(TpmVO vo) { + TpmInventory inv = new TpmInventory(); + inv.setUuid(vo.getUuid()); + inv.setName(vo.getResourceName()); + inv.setVmInstanceUuid(vo.getVmInstanceUuid()); + inv.setCreateDate(vo.getCreateDate()); + inv.setLastOpDate(vo.getLastOpDate()); + inv.setHostRefs(TpmHostRefInventory.valueOf(vo.getHostRefs())); + return inv; + } + + public static List valueOf(Collection vos) { + return transform(vos, TpmInventory::valueOf); + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + public List getHostRefs() { + return hostRefs; + } + + public void setHostRefs(List hostRefs) { + this.hostRefs = hostRefs; + } + + public static TpmInventory __example__() { + TpmInventory tpm = new TpmInventory(); + tpm.setUuid(DocUtils.createFixedUuid(TpmVO.class)); + tpm.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + tpm.setName("TPM-for-VM-" + tpm.getVmInstanceUuid()); + tpm.setCreateDate(DocUtils.timestamp()); + tpm.setLastOpDate(DocUtils.timestamp()); + tpm.setHostRefs(list(TpmHostRefInventory.__example__())); + return tpm; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy new file mode 100644 index 00000000000..9908023d6c4 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy @@ -0,0 +1,48 @@ +package org.zstack.header.tpm.entity + +import java.sql.Timestamp +import org.zstack.header.tpm.entity.TpmHostRefInventory + +doc { + + title "TPM 信息" + + field { + name "uuid" + desc "TPM UUID" + type "String" + since "5.0.0" + } + field { + name "name" + desc "TPM 资源名称" + type "String" + since "5.0.0" + } + field { + name "vmInstanceUuid" + desc "虚拟机 UUID" + type "String" + since "5.0.0" + } + field { + name "createDate" + desc "创建时间" + type "Timestamp" + since "5.0.0" + } + field { + name "lastOpDate" + desc "最后一次修改时间" + type "Timestamp" + since "5.0.0" + } + ref { + name "hostRefs" + path "org.zstack.header.tpm.entity.TpmInventory.hostRefs" + desc "TPM 与主机的相关数据列表" + type "List" + since "5.0.0" + clz TpmHostRefInventory.class + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmSpec.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmSpec.java new file mode 100644 index 00000000000..60efd9a9df9 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmSpec.java @@ -0,0 +1,30 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.utils.StringDSL; + +public class TpmSpec { + private boolean enable = true; + private String keyProviderUuid; + + public boolean isEnable() { + return enable; + } + + public void setEnable(boolean enable) { + this.enable = enable; + } + + public String getKeyProviderUuid() { + return keyProviderUuid; + } + + public void setKeyProviderUuid(String keyProviderUuid) { + this.keyProviderUuid = keyProviderUuid; + } + + public static TpmSpec __example__() { + TpmSpec tpm = new TpmSpec(); + tpm.setKeyProviderUuid(StringDSL.createFixedUuid("keyProviderUuid")); + return tpm; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java new file mode 100644 index 00000000000..02bbda43431 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java @@ -0,0 +1,110 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.identity.OwnedByAccount; +import org.zstack.header.tag.AutoDeleteTag; +import org.zstack.header.vm.VmInstanceEO; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.header.vo.BaseResource; +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.NoView; +import org.zstack.header.vo.ResourceVO; +import org.zstack.header.vo.SoftDeletionCascade; +import org.zstack.header.vo.SoftDeletionCascades; +import org.zstack.header.vo.ToInventory; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.JoinColumn; +import javax.persistence.OneToMany; +import javax.persistence.Table; +import javax.persistence.Transient; +import java.sql.Timestamp; +import java.util.HashSet; +import java.util.Set; + +@Entity +@Table +@BaseResource +@AutoDeleteTag +@SoftDeletionCascades({ + @SoftDeletionCascade(parent = VmInstanceEO.class, joinColumn = "vmInstanceUuid") +}) +@EntityGraph( + parents = { + @EntityGraph.Neighbour(type = VmInstanceVO.class, myField = "vmInstanceUuid", targetField = "uuid"), + } +) +public class TpmVO extends ResourceVO implements ToInventory, OwnedByAccount { + @Column + @ForeignKey(parentEntityClass = VmInstanceEO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String vmInstanceUuid; + + @Column + private Timestamp createDate; + + @Column + private Timestamp lastOpDate; + + @Transient + private String accountUuid; + + @OneToMany(fetch = FetchType.EAGER) + @JoinColumn(name = "tpmUuid", insertable = false, updatable = false) + @NoView + private Set hostRefs = new HashSet<>(); + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + @Override + public String getAccountUuid() { + return accountUuid; + } + + @Override + public void setAccountUuid(String accountUuid) { + this.accountUuid = accountUuid; + } + + public Set getHostRefs() { + return hostRefs; + } + + public void setHostRefs(Set hostRefs) { + this.hostRefs = hostRefs; + } + + public TpmVO() { + } + + @Override + public String toString() { + return "TpmVO{" + + "vmInstanceUuid='" + vmInstanceUuid + '\'' + + ", uuid='" + uuid + '\'' + + '}'; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmVO_.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO_.java new file mode 100644 index 00000000000..99fb5aa3ba5 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO_.java @@ -0,0 +1,14 @@ +package org.zstack.header.tpm.entity; + +import org.zstack.header.vo.ResourceVO_; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(TpmVO.class) +public class TpmVO_ extends ResourceVO_ { + public static volatile SingularAttribute vmInstanceUuid; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/header/src/main/java/org/zstack/header/tpm/message/AddTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/message/AddTpmMsg.java new file mode 100644 index 00000000000..2971d2b06f2 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/message/AddTpmMsg.java @@ -0,0 +1,45 @@ +package org.zstack.header.tpm.message; + +import org.zstack.header.message.NeedReplyMessage; +import org.zstack.header.tpm.api.APIAddTpmMsg; + +public class AddTpmMsg extends NeedReplyMessage { + private String keyProviderUuid; + private String vmInstanceUuid; + /** + * for creating TpmVO + */ + private String tpmUuid; + + public String getKeyProviderUuid() { + return keyProviderUuid; + } + + public void setKeyProviderUuid(String keyProviderUuid) { + this.keyProviderUuid = keyProviderUuid; + } + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public String getTpmUuid() { + return tpmUuid; + } + + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + public static AddTpmMsg valueOf(APIAddTpmMsg api) { + AddTpmMsg msg = new AddTpmMsg(); + msg.setKeyProviderUuid(api.getKeyProviderUuid()); + msg.setVmInstanceUuid(api.getVmInstanceUuid()); + msg.setTpmUuid(api.getResourceUuid()); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/message/AddTpmReply.java b/header/src/main/java/org/zstack/header/tpm/message/AddTpmReply.java new file mode 100644 index 00000000000..0cbcc9bf1e3 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/message/AddTpmReply.java @@ -0,0 +1,16 @@ +package org.zstack.header.tpm.message; + +import org.zstack.header.message.MessageReply; +import org.zstack.header.tpm.entity.TpmInventory; + +public class AddTpmReply extends MessageReply { + private TpmInventory inventory; + + public TpmInventory getInventory() { + return inventory; + } + + public void setInventory(TpmInventory inventory) { + this.inventory = inventory; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmMsg.java b/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmMsg.java new file mode 100644 index 00000000000..c68e16bc8d5 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmMsg.java @@ -0,0 +1,32 @@ +package org.zstack.header.tpm.message; + +import org.zstack.header.message.NeedReplyMessage; +import org.zstack.header.tpm.api.APIRemoveTpmMsg; + +public class RemoveTpmMsg extends NeedReplyMessage { + private String tpmUuid; + private String vmInstanceUuid; + + public String getTpmUuid() { + return tpmUuid; + } + + public void setTpmUuid(String tpmUuid) { + this.tpmUuid = tpmUuid; + } + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public static RemoveTpmMsg valueOf(APIRemoveTpmMsg api) { + RemoveTpmMsg msg = new RemoveTpmMsg(); + msg.setTpmUuid(api.getTpmUuid()); + msg.setVmInstanceUuid(api.getVmInstanceUuid()); + return msg; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmReply.java b/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmReply.java new file mode 100644 index 00000000000..ad2e3b13292 --- /dev/null +++ b/header/src/main/java/org/zstack/header/tpm/message/RemoveTpmReply.java @@ -0,0 +1,6 @@ +package org.zstack.header.tpm.message; + +import org.zstack.header.message.MessageReply; + +public class RemoveTpmReply extends MessageReply { +} diff --git a/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsg.java b/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsg.java index 1a128bfaf84..46e443c35cf 100644 --- a/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsg.java +++ b/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsg.java @@ -21,6 +21,9 @@ public class APIConvertTemplatedVmInstanceToVmInstanceMsg extends APIMessage imp @APIParam(maxLength = 255, required = true) private String name; + @APIParam(required = false) + private Boolean resetTpm; + @APINoSee private String vmInstanceUuid; @@ -40,6 +43,14 @@ public void setName(String name) { this.name = name; } + public Boolean getResetTpm() { + return resetTpm; + } + + public void setResetTpm(Boolean resetTpm) { + this.resetTpm = resetTpm; + } + @Override public String getVmInstanceUuid() { return vmInstanceUuid; diff --git a/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsgDoc_zh_cn.groovy index 554439a51d0..1f20e124b8c 100644 --- a/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsgDoc_zh_cn.groovy +++ b/header/src/main/java/org/zstack/header/vm/APIConvertTemplatedVmInstanceToVmInstanceMsgDoc_zh_cn.groovy @@ -57,6 +57,15 @@ doc { optional true since "zsv 4.2.6" } + column { + name "resetTpm" + enclosedIn "params" + desc "转换成虚拟机后是否重置 TPM 状态" + location "body" + type "Boolean" + optional true + since "5.0.0" + } } } diff --git a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsg.java b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsg.java index 2d27e9b264f..94d63ca232a 100644 --- a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsg.java +++ b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsg.java @@ -115,6 +115,9 @@ public class APICreateVmInstanceFromVolumeSnapshotGroupMsg extends APICreateMess @APINoSee private String platform; + @APIParam(required = false) + private Boolean resetTpm; + public String getName() { return name; } @@ -292,4 +295,12 @@ public String getPlatform() { public void setPlatform(String platform) { this.platform = platform; } + + public Boolean getResetTpm() { + return resetTpm; + } + + public void setResetTpm(Boolean resetTpm) { + this.resetTpm = resetTpm; + } } diff --git a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsgDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsgDoc_zh_cn.groovy index b2ba50497ec..e8262a13931 100644 --- a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsgDoc_zh_cn.groovy +++ b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceFromVolumeSnapshotGroupMsgDoc_zh_cn.groovy @@ -221,6 +221,15 @@ doc { optional true since "4.10.10" } + column { + name "resetTpm" + enclosedIn "params" + desc "是否重置 TPM 状态" + location "body" + type "Boolean" + optional true + since "5.0.0" + } } } diff --git a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceMsg.java b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceMsg.java index f6a924c477d..e085a0a0239 100755 --- a/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceMsg.java +++ b/header/src/main/java/org/zstack/header/vm/APICreateVmInstanceMsg.java @@ -10,11 +10,14 @@ import org.zstack.header.message.*; import org.zstack.header.network.l3.L3NetworkVO; import org.zstack.header.other.APIAuditor; +import org.zstack.header.rest.APINoSee; import org.zstack.header.rest.RestRequest; import org.zstack.header.storage.primary.PrimaryStorageVO; import org.zstack.header.tag.TagResourceType; +import org.zstack.header.vm.devices.VmDevicesSpec; import org.zstack.header.volume.VolumeVO; import org.zstack.header.zone.ZoneVO; +import org.zstack.utils.gson.JSONObjectUtil; import java.util.Collections; import java.util.List; @@ -225,6 +228,15 @@ public class APICreateVmInstanceMsg extends APICreateMessage implements APIAudit @APIParam(required = false) private List diskAOs; + @APIParam(required = false) + private Map devices; + + /** + * cache of {@link #devices} + */ + @APINoSee + private VmDevicesSpec devicesSpec; + public List getDiskAOs() { return diskAOs; } @@ -466,6 +478,26 @@ public void setAllocatorStrategy(String allocatorStrategy) { this.allocatorStrategy = allocatorStrategy; } + public Map getDevices() { + return devices; + } + + public void setDevices(Map devices) { + this.devices = devices; + } + + public VmDevicesSpec getDevicesSpec() { + if (devicesSpec == null && devices != null) { + devicesSpec = JSONObjectUtil.rehashObject(devices, VmDevicesSpec.class); + } + return devicesSpec; + } + + public void setDevicesSpec(VmDevicesSpec devicesSpec) { + this.devicesSpec = devicesSpec; + } + + @SuppressWarnings("unchecked") public static APICreateVmInstanceMsg __example__() { APICreateVmInstanceMsg msg = new APICreateVmInstanceMsg(); msg.setName("vm1"); @@ -489,6 +521,7 @@ public static APICreateVmInstanceMsg __example__() { disk2.setPrimaryStorageUuid(uuid(PrimaryStorageVO.class)); msg.setDiskAOs(list(disk1, disk2)); + msg.setDevices(JSONObjectUtil.rehashObject(VmDevicesSpec.__example__(), Map.class)); return msg; } diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceVO.java b/header/src/main/java/org/zstack/header/vm/VmInstanceVO.java index 0923c8e356a..5306142ee46 100755 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceVO.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceVO.java @@ -5,6 +5,7 @@ import org.zstack.header.host.HostVO; import org.zstack.header.identity.OwnedByAccount; import org.zstack.header.image.ImageVO; +import org.zstack.header.tpm.entity.TpmVO; import org.zstack.header.vm.cdrom.VmCdRomVO; import org.zstack.header.vo.*; import org.zstack.header.vo.EntityGraph; @@ -36,6 +37,7 @@ @EntityGraph.Neighbour(type = VolumeVO.class, myField = "rootVolumeUuid", targetField = "uuid"), @EntityGraph.Neighbour(type = VmNicVO.class, myField = "uuid", targetField = "vmInstanceUuid"), @EntityGraph.Neighbour(type = VmCdRomVO.class, myField = "uuid", targetField = "vmInstanceUuid"), + @EntityGraph.Neighbour(type = TpmVO.class, myField = "uuid", targetField = "vmInstanceUuid"), } ) public class VmInstanceVO extends VmInstanceAO implements OwnedByAccount, ToInventory { diff --git a/header/src/main/java/org/zstack/header/vm/devices/VmDevicesSpec.java b/header/src/main/java/org/zstack/header/vm/devices/VmDevicesSpec.java new file mode 100644 index 00000000000..6b24f045a8f --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/devices/VmDevicesSpec.java @@ -0,0 +1,24 @@ +package org.zstack.header.vm.devices; + +import org.zstack.header.tpm.entity.TpmSpec; + +public class VmDevicesSpec { + /** + * Default value of tpm must be null, because tpm.enable is true by default. + */ + private TpmSpec tpm; + + public TpmSpec getTpm() { + return tpm; + } + + public void setTpm(TpmSpec tpm) { + this.tpm = tpm; + } + + public static VmDevicesSpec __example__() { + VmDevicesSpec spec = new VmDevicesSpec(); + spec.setTpm(TpmSpec.__example__()); + return spec; + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java index 6d1a7eaeb5c..4a724abac2a 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java @@ -159,4 +159,9 @@ public class KVMGlobalConfig { type = Long.class ) public static GlobalConfig KVMAGENT_PHYSICAL_MEMORY_USAGE_HARD_LIMIT = new GlobalConfig(CATEGORY, "kvmagent.physicalmemory.usage.hardlimit"); + + @GlobalConfigDef(defaultValue = "", + description = "Specify the EDK version to be used for the next VM startup. Default empty string indicates the use of the system's default EDK version") + @BindResourceConfig(value = {VmInstanceVO.class, ClusterVO.class}) + public static GlobalConfig VM_EDK_VERSION_CONFIG = new GlobalConfig(CATEGORY, "vm.edk.version"); } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java index 63735015f57..8f0442f9246 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMSystemTags.java @@ -71,4 +71,8 @@ public class KVMSystemTags { public static final String EDK_RPM_TOKEN = "edkRpm"; public static PatternedSystemTag VM_EDK = new PatternedSystemTag(String.format("vm::edk::{%s}", EDK_RPM_TOKEN), VmInstanceVO.class); + + public static final String SWTPM_VERSION_TOKEN = "version"; + public static PatternedSystemTag SWTPM_VERSION = + new PatternedSystemTag(String.format("swtpm::{%s}", SWTPM_VERSION_TOKEN), HostVO.class); } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java new file mode 100644 index 00000000000..2d643de746b --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java @@ -0,0 +1,374 @@ +package org.zstack.kvm.tpm; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.core.cloudbus.CloudBus; +import org.zstack.core.cloudbus.CloudBusCallBack; +import org.zstack.core.cloudbus.MessageSafe; +import org.zstack.core.db.DatabaseFacade; +import org.zstack.core.db.Q; +import org.zstack.core.db.SQL; +import org.zstack.core.thread.ChainTask; +import org.zstack.core.thread.SyncTaskChain; +import org.zstack.core.thread.ThreadFacade; +import org.zstack.core.workflow.SimpleFlowChain; +import org.zstack.header.AbstractService; +import org.zstack.header.core.Completion; +import org.zstack.header.core.workflow.FlowDoneHandler; +import org.zstack.header.core.workflow.FlowErrorHandler; +import org.zstack.header.core.workflow.FlowTrigger; +import org.zstack.header.core.workflow.NoRollbackFlow; +import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.message.APIMessage; +import org.zstack.header.message.Message; +import org.zstack.header.message.MessageReply; +import org.zstack.header.tpm.api.APIAddTpmEvent; +import org.zstack.header.tpm.api.APIAddTpmMsg; +import org.zstack.header.tpm.api.APIGetTpmCapabilityMsg; +import org.zstack.header.tpm.api.APIGetTpmCapabilityReply; +import org.zstack.header.tpm.api.APIRemoveTpmEvent; +import org.zstack.header.tpm.api.APIRemoveTpmMsg; +import org.zstack.header.tpm.api.APIUpdateTpmMsg; +import org.zstack.header.tpm.entity.TpmCapabilityView; +import org.zstack.header.tpm.entity.TpmInventory; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; +import org.zstack.header.tpm.message.AddTpmMsg; +import org.zstack.header.tpm.message.AddTpmReply; +import org.zstack.header.tpm.message.RemoveTpmMsg; +import org.zstack.header.tpm.message.RemoveTpmReply; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.header.vm.VmInstanceVO_; +import org.zstack.resourceconfig.ResourceConfig; +import org.zstack.resourceconfig.ResourceConfigFacade; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +import java.util.Map; + +import static org.zstack.compute.vm.VmGlobalConfig.RESET_TPM_AFTER_VM_CLONE; +import static org.zstack.core.Platform.err; +import static org.zstack.header.errorcode.SysErrors.NOT_SUPPORTED; +import static org.zstack.header.tpm.TpmConstants.*; +import static org.zstack.header.tpm.TpmErrors.VM_STATE_ERROR; +import static org.zstack.kvm.KVMSystemTags.EDK_RPM_TOKEN; +import static org.zstack.kvm.KVMSystemTags.SWTPM_VERSION; +import static org.zstack.kvm.KVMSystemTags.SWTPM_VERSION_TOKEN; +import static org.zstack.kvm.KVMSystemTags.VM_EDK; + +public class KvmTpmManager extends AbstractService { + private static final CLogger logger = Utils.getLogger(KvmTpmManager.class); + + @Autowired + private CloudBus bus; + @Autowired + private ThreadFacade threadFacade; + @Autowired + private DatabaseFacade databaseFacade; + @Autowired + private ResourceConfigFacade resourceConfigFacade; + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public String getId() { + return bus.makeLocalServiceId(SERVICE_ID); + } + + private String tpmQueueSyncSignature(String vmUuid) { + return String.format("tpm-queue-sync-%s", vmUuid); + } + + @MessageSafe + public void handleMessage(Message msg) { + if (msg instanceof APIMessage) { + handleApiMessage((APIMessage) msg); + } else { + handleLocalMessage(msg); + } + } + + private void handleLocalMessage(Message msg) { + if (msg instanceof AddTpmMsg) { + handle((AddTpmMsg) msg); + } else if (msg instanceof RemoveTpmMsg) { + handle((RemoveTpmMsg) msg); + } else { + bus.dealWithUnknownMessage(msg); + } + } + + private void handleApiMessage(APIMessage msg) { + if (msg instanceof APIGetTpmCapabilityMsg) { + handle((APIGetTpmCapabilityMsg) msg); + } else if (msg instanceof APIAddTpmMsg) { + handle((APIAddTpmMsg) msg); + } else if (msg instanceof APIRemoveTpmMsg) { + handle((APIRemoveTpmMsg) msg); + } else if (msg instanceof APIUpdateTpmMsg) { + handle((APIUpdateTpmMsg) msg); + } else { + bus.dealWithUnknownMessage(msg); + } + } + + private void handle(AddTpmMsg msg) { + AddTpmReply reply = new AddTpmReply(); + threadFacade.chainSubmit(new ChainTask(msg) { + @Override + public void run(SyncTaskChain chain) { + AddTpmToVmContext context = AddTpmToVmContext.valueOf(msg); + addTpmToVm(context, new Completion(chain, msg) { + @Override + public void success() { + chain.next(); + TpmVO vo = Q.New(TpmVO.class) + .eq(TpmVO_.uuid, msg.getTpmUuid()) + .find(); + reply.setInventory(TpmInventory.valueOf(vo)); + bus.reply(msg, reply); + } + + @Override + public void fail(ErrorCode errorCode) { + chain.next(); + reply.setError(errorCode); + bus.reply(msg, reply); + } + }); + } + + @Override + public String getSyncSignature() { + return tpmQueueSyncSignature(msg.getVmInstanceUuid()); + } + + @Override + public String getName() { + return "queue-of-add-tpm-to-vm-" + msg.getVmInstanceUuid(); + } + }); + } + + static class AddTpmToVmContext { + String keyProviderUuid; + String vmInstanceUuid; + String tpmUuid; + + static AddTpmToVmContext valueOf(AddTpmMsg msg) { + AddTpmToVmContext context = new AddTpmToVmContext(); + context.keyProviderUuid = msg.getKeyProviderUuid(); + context.vmInstanceUuid = msg.getVmInstanceUuid(); + context.tpmUuid = msg.getTpmUuid(); + return context; + } + } + + @SuppressWarnings("rawtypes") + private void addTpmToVm(AddTpmToVmContext context, Completion completion) { + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setName("add-tpm-to-vm-" + context.vmInstanceUuid); + chain.then(new NoRollbackFlow() { + String __name__ = "check-vm-status"; + + @Override + public void run(FlowTrigger trigger, Map data) { + VmInstanceVO vm = Q.New(VmInstanceVO.class) + .eq(VmInstanceVO_.uuid, context.vmInstanceUuid) + .find(); + + if (!SUPPORT_VM_STATES_FOR_TPM_OPERATION.contains(vm.getState())) { + trigger.fail(err(VM_STATE_ERROR, + "The current VM state does not support adding TPM operations") + .withOpaque("support.vm.state", SUPPORT_VM_STATES_FOR_TPM_OPERATION)); + return; + } + trigger.next(); + } + }).then(new NoRollbackFlow() { + String __name__ = "create-tpm-db-records"; + + @Override + public void run(FlowTrigger trigger, Map data) { + TpmVO tpm = new TpmVO(); + tpm.setUuid(context.tpmUuid); + tpm.setResourceName("TPM-for-VM-" + context.vmInstanceUuid); + tpm.setVmInstanceUuid(context.vmInstanceUuid); + databaseFacade.persist(tpm); + trigger.next(); + } + }).done(new FlowDoneHandler(completion) { + @Override + public void handle(Map data) { + completion.success(); + } + }).error(new FlowErrorHandler(completion) { + @Override + public void handle(ErrorCode errorCode, Map data) { + completion.fail(errorCode); + } + }).start(); + } + + private void handle(RemoveTpmMsg msg) { + RemoveTpmReply reply = new RemoveTpmReply(); + threadFacade.chainSubmit(new ChainTask(msg) { + @Override + public void run(SyncTaskChain chain) { + RemoveTpmFromVmContext context = RemoveTpmFromVmContext.valueOf(msg); + removeTpmFromVm(context, new Completion(chain, msg) { + @Override + public void success() { + chain.next(); + bus.reply(msg, reply); + } + + @Override + public void fail(ErrorCode errorCode) { + chain.next(); + reply.setError(errorCode); + bus.reply(msg, reply); + } + }); + } + + @Override + public String getSyncSignature() { + return tpmQueueSyncSignature(msg.getVmInstanceUuid()); + } + + @Override + public String getName() { + return "queue-of-remove-tpm-from-vm-" + msg.getVmInstanceUuid(); + } + }); + } + + static class RemoveTpmFromVmContext { + String vmInstanceUuid; + String tpmUuid; + + static RemoveTpmFromVmContext valueOf(RemoveTpmMsg msg) { + RemoveTpmFromVmContext context = new RemoveTpmFromVmContext(); + context.vmInstanceUuid = msg.getVmInstanceUuid(); + context.tpmUuid = msg.getTpmUuid(); + return context; + } + } + + @SuppressWarnings("rawtypes") + private void removeTpmFromVm(RemoveTpmFromVmContext context, Completion completion) { + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setName("remove-tpm-from-vm-" + context.vmInstanceUuid); + chain.then(new NoRollbackFlow() { + String __name__ = "check-vm-status"; + + @Override + public void run(FlowTrigger trigger, Map data) { + VmInstanceVO vm = Q.New(VmInstanceVO.class) + .eq(VmInstanceVO_.uuid, context.vmInstanceUuid) + .find(); + + if (!SUPPORT_VM_STATES_FOR_TPM_OPERATION.contains(vm.getState())) { + trigger.fail(err(VM_STATE_ERROR, + "The current VM state does not support removing TPM operations") + .withOpaque("support.vm.state", SUPPORT_VM_STATES_FOR_TPM_OPERATION)); + return; + } + trigger.next(); + } + }).then(new NoRollbackFlow() { + String __name__ = "remove-tpm-db-records"; + + @Override + public void run(FlowTrigger trigger, Map data) { + SQL.New(TpmVO.class) + .eq(TpmVO_.uuid, context.tpmUuid) + .delete(); + trigger.next(); + } + }).done(new FlowDoneHandler(completion) { + @Override + public void handle(Map data) { + completion.success(); + } + }).error(new FlowErrorHandler(completion) { + @Override + public void handle(ErrorCode errorCode, Map data) { + completion.fail(errorCode); + } + }).start(); + } + + private void handle(APIGetTpmCapabilityMsg msg) { + TpmCapabilityView view = new TpmCapabilityView(); + + final TpmVO tpm = Q.New(TpmVO.class) + .eq(TpmVO_.uuid, msg.getTpmUuid()) + .findValue(); + final VmInstanceVO vm = Q.New(VmInstanceVO.class) + .eq(VmInstanceVO_.uuid, tpm.getVmInstanceUuid()) + .findValue(); + + view.setTpmInventory(TpmInventory.valueOf(tpm)); + view.setEdkVersion(VM_EDK.getTokenByResourceUuid(vm.getUuid(), EDK_RPM_TOKEN)); + + if (vm.getHostUuid() != null) { + view.setSwtpmVersion(SWTPM_VERSION.getTokenByResourceUuid(vm.getHostUuid(), SWTPM_VERSION_TOKEN)); + } else if (vm.getLastHostUuid() != null) { + view.setSwtpmVersion(SWTPM_VERSION.getTokenByResourceUuid(vm.getLastHostUuid(), SWTPM_VERSION_TOKEN)); + } + + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(RESET_TPM_AFTER_VM_CLONE.getIdentity()); + view.setResetTpmAfterVmCloneConfig(resourceConfig.getResourceConfigValue(vm.getUuid(), Boolean.class)); + + APIGetTpmCapabilityReply reply = new APIGetTpmCapabilityReply(); + reply.setInventory(view); + bus.reply(msg, reply); + } + + private void handle(APIAddTpmMsg msg) { + APIAddTpmEvent event = new APIAddTpmEvent(msg.getId()); + + AddTpmMsg inner = AddTpmMsg.valueOf(msg); + bus.makeTargetServiceIdByResourceUuid(inner, SERVICE_ID, msg.getResourceUuid()); + bus.send(inner, new CloudBusCallBack(msg) { + @Override + public void run(MessageReply reply) { + if (reply.isSuccess()) { + event.setInventory(((AddTpmReply) reply.castReply()).getInventory()); + } else { + event.setError(reply.getError()); + } + bus.publish(event); + } + }); + } + + private void handle(APIRemoveTpmMsg msg) { + APIRemoveTpmEvent event = new APIRemoveTpmEvent(msg.getId()); + + RemoveTpmMsg inner = RemoveTpmMsg.valueOf(msg); + bus.makeTargetServiceIdByResourceUuid(inner, SERVICE_ID, msg.getTpmUuid()); + bus.send(inner, new CloudBusCallBack(msg) { + @Override + public void run(MessageReply reply) { + if (!reply.isSuccess()) { + event.setError(reply.getError()); + } + bus.publish(event); + } + }); + } + + private void handle(APIUpdateTpmMsg msg) { + throw err(NOT_SUPPORTED, "UpdateTpm is not supported in current version").toException(); + } +} diff --git a/sdk/src/main/java/SourceClassMap.java b/sdk/src/main/java/SourceClassMap.java index 146b132c459..821f401de47 100644 --- a/sdk/src/main/java/SourceClassMap.java +++ b/sdk/src/main/java/SourceClassMap.java @@ -270,6 +270,9 @@ public class SourceClassMap { put("org.zstack.header.tag.TagPatternInventory", "org.zstack.sdk.TagPatternInventory"); put("org.zstack.header.tag.TagPatternType", "org.zstack.sdk.TagPatternType"); put("org.zstack.header.tag.UserTagInventory", "org.zstack.sdk.UserTagInventory"); + put("org.zstack.header.tpm.entity.TpmCapabilityView", "org.zstack.sdk.tpm.entity.TpmCapabilityView"); + put("org.zstack.header.tpm.entity.TpmHostRefInventory", "org.zstack.sdk.tpm.entity.TpmHostRefInventory"); + put("org.zstack.header.tpm.entity.TpmInventory", "org.zstack.sdk.tpm.entity.TpmInventory"); put("org.zstack.header.vdpa.VmVdpaNicInventory", "org.zstack.sdk.VmVdpaNicInventory"); put("org.zstack.header.vipQos.VipQosInventory", "org.zstack.sdk.VipQosInventory"); put("org.zstack.header.vm.CloneVmInstanceInventory", "org.zstack.sdk.CloneVmInstanceInventory"); @@ -1270,6 +1273,9 @@ public class SourceClassMap { put("org.zstack.sdk.sns.platform.wecom.SNSWeComEndpointInventory", "org.zstack.sns.platform.wecom.SNSWeComEndpointInventory"); put("org.zstack.sdk.softwarePackage.header.JobDetails", "org.zstack.softwarePackage.header.JobDetails"); put("org.zstack.sdk.softwarePackage.header.SoftwarePackageInventory", "org.zstack.softwarePackage.header.SoftwarePackageInventory"); + put("org.zstack.sdk.tpm.entity.TpmCapabilityView", "org.zstack.header.tpm.entity.TpmCapabilityView"); + put("org.zstack.sdk.tpm.entity.TpmHostRefInventory", "org.zstack.header.tpm.entity.TpmHostRefInventory"); + put("org.zstack.sdk.tpm.entity.TpmInventory", "org.zstack.header.tpm.entity.TpmInventory"); put("org.zstack.sdk.zbox.ZBoxBackupInventory", "org.zstack.externalbackup.zbox.ZBoxBackupInventory"); put("org.zstack.sdk.zbox.ZBoxBackupStorageBackupInfo", "org.zstack.externalbackup.zbox.ZBoxBackupStorageBackupInfo"); put("org.zstack.sdk.zbox.ZBoxInventory", "org.zstack.zbox.ZBoxInventory"); diff --git a/sdk/src/main/java/org/zstack/heder/storage/volume/backup/CreateVmFromVolumeBackupAction.java b/sdk/src/main/java/org/zstack/heder/storage/volume/backup/CreateVmFromVolumeBackupAction.java index 1a56f081ab4..a1a9af07824 100644 --- a/sdk/src/main/java/org/zstack/heder/storage/volume/backup/CreateVmFromVolumeBackupAction.java +++ b/sdk/src/main/java/org/zstack/heder/storage/volume/backup/CreateVmFromVolumeBackupAction.java @@ -73,6 +73,9 @@ public Result throwExceptionIfError() { @Param(required = false, validValues = {"InstantStart","JustCreate","CreateStopped"}, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.lang.String strategy = "InstantStart"; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.Boolean resetTpm; + @Param(required = false) public java.lang.String resourceUuid; diff --git a/sdk/src/main/java/org/zstack/sdk/CloneVmInstanceAction.java b/sdk/src/main/java/org/zstack/sdk/CloneVmInstanceAction.java index 23ebc2033ca..661106961c5 100644 --- a/sdk/src/main/java/org/zstack/sdk/CloneVmInstanceAction.java +++ b/sdk/src/main/java/org/zstack/sdk/CloneVmInstanceAction.java @@ -67,6 +67,9 @@ public Result throwExceptionIfError() { @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public org.zstack.sdk.VmCustomSpecificationStruct vmCustomSpecification; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.Boolean resetTpm; + @Param(required = false) public java.util.List systemTags; diff --git a/sdk/src/main/java/org/zstack/sdk/ConvertTemplatedVmInstanceToVmInstanceAction.java b/sdk/src/main/java/org/zstack/sdk/ConvertTemplatedVmInstanceToVmInstanceAction.java index 1bc3dbc2656..5989a4c96d0 100644 --- a/sdk/src/main/java/org/zstack/sdk/ConvertTemplatedVmInstanceToVmInstanceAction.java +++ b/sdk/src/main/java/org/zstack/sdk/ConvertTemplatedVmInstanceToVmInstanceAction.java @@ -31,6 +31,9 @@ public Result throwExceptionIfError() { @Param(required = true, maxLength = 255, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.lang.String name; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.Boolean resetTpm; + @Param(required = false) public java.util.List systemTags; diff --git a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceAction.java b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceAction.java index 8c05a04e241..f475d3c4761 100644 --- a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceAction.java +++ b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceAction.java @@ -123,6 +123,9 @@ public Result throwExceptionIfError() { @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.util.List diskAOs; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.util.Map devices; + @Param(required = false) public java.lang.String resourceUuid; diff --git a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromTemplatedVmInstanceAction.java b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromTemplatedVmInstanceAction.java index 8994903a456..4e699ce5481 100644 --- a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromTemplatedVmInstanceAction.java +++ b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromTemplatedVmInstanceAction.java @@ -76,6 +76,9 @@ public Result throwExceptionIfError() { @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public org.zstack.sdk.VmCustomSpecificationStruct vmCustomSpecification; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.Boolean resetTpm; + @Param(required = false) public java.util.List systemTags; diff --git a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromVolumeSnapshotGroupAction.java b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromVolumeSnapshotGroupAction.java index 2eb0f1fa80c..7f477d9b0fb 100644 --- a/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromVolumeSnapshotGroupAction.java +++ b/sdk/src/main/java/org/zstack/sdk/CreateVmInstanceFromVolumeSnapshotGroupAction.java @@ -79,6 +79,9 @@ public Result throwExceptionIfError() { @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.util.Map dataVolumeSystemTags; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.Boolean resetTpm; + @Param(required = false) public java.lang.String resourceUuid; diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmAction.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmAction.java new file mode 100644 index 00000000000..63fdcb78673 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmAction.java @@ -0,0 +1,110 @@ +package org.zstack.sdk.tpm.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class AddTpmAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.tpm.api.AddTpmResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, maxLength = 32, minLength = 32, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String keyProviderUuid; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String vmInstanceUuid; + + @Param(required = false) + public java.lang.String resourceUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.util.List tagUuids; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.tpm.api.AddTpmResult value = res.getResult(org.zstack.sdk.tpm.api.AddTpmResult.class); + ret.value = value == null ? new org.zstack.sdk.tpm.api.AddTpmResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "POST"; + info.path = "/tpms"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "params"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmResult.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmResult.java new file mode 100644 index 00000000000..05effc20779 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/AddTpmResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.tpm.api; + +import org.zstack.sdk.tpm.entity.TpmInventory; + +public class AddTpmResult { + public TpmInventory inventory; + public void setInventory(TpmInventory inventory) { + this.inventory = inventory; + } + public TpmInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityAction.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityAction.java new file mode 100644 index 00000000000..d4a8c793040 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityAction.java @@ -0,0 +1,98 @@ +package org.zstack.sdk.tpm.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class GetTpmCapabilityAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.tpm.api.GetTpmCapabilityResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String tpmUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String vmInstanceUuid; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.tpm.api.GetTpmCapabilityResult value = res.getResult(org.zstack.sdk.tpm.api.GetTpmCapabilityResult.class); + ret.value = value == null ? new org.zstack.sdk.tpm.api.GetTpmCapabilityResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "GET"; + info.path = "/tpms/capability"; + info.needSession = true; + info.needPoll = false; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityResult.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityResult.java new file mode 100644 index 00000000000..66d43f8a70d --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/GetTpmCapabilityResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.tpm.api; + +import org.zstack.sdk.tpm.entity.TpmCapabilityView; + +public class GetTpmCapabilityResult { + public TpmCapabilityView inventory; + public void setInventory(TpmCapabilityView inventory) { + this.inventory = inventory; + } + public TpmCapabilityView getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmAction.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmAction.java new file mode 100644 index 00000000000..e6b41993ebb --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmAction.java @@ -0,0 +1,75 @@ +package org.zstack.sdk.tpm.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class QueryTpmAction extends QueryAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.tpm.api.QueryTpmResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.tpm.api.QueryTpmResult value = res.getResult(org.zstack.sdk.tpm.api.QueryTpmResult.class); + ret.value = value == null ? new org.zstack.sdk.tpm.api.QueryTpmResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "GET"; + info.path = "/tpms"; + info.needSession = true; + info.needPoll = false; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmResult.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmResult.java new file mode 100644 index 00000000000..1414409e00a --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/QueryTpmResult.java @@ -0,0 +1,22 @@ +package org.zstack.sdk.tpm.api; + + + +public class QueryTpmResult { + public java.util.List inventories; + public void setInventories(java.util.List inventories) { + this.inventories = inventories; + } + public java.util.List getInventories() { + return this.inventories; + } + + public java.lang.Long total; + public void setTotal(java.lang.Long total) { + this.total = total; + } + public java.lang.Long getTotal() { + return this.total; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmAction.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmAction.java new file mode 100644 index 00000000000..a404cfe8df3 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmAction.java @@ -0,0 +1,107 @@ +package org.zstack.sdk.tpm.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class RemoveTpmAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.tpm.api.RemoveTpmResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String tpmUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String vmInstanceUuid; + + @Param(required = false) + public java.lang.String deleteMode = "Permissive"; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.tpm.api.RemoveTpmResult value = res.getResult(org.zstack.sdk.tpm.api.RemoveTpmResult.class); + ret.value = value == null ? new org.zstack.sdk.tpm.api.RemoveTpmResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "DELETE"; + info.path = "/tpms"; + info.needSession = true; + info.needPoll = true; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmResult.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmResult.java new file mode 100644 index 00000000000..93f74261a83 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/RemoveTpmResult.java @@ -0,0 +1,7 @@ +package org.zstack.sdk.tpm.api; + + + +public class RemoveTpmResult { + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmAction.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmAction.java new file mode 100644 index 00000000000..28fd0257b14 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmAction.java @@ -0,0 +1,107 @@ +package org.zstack.sdk.tpm.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UpdateTpmAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.tpm.api.UpdateTpmResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String vmInstanceUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String tpmUuid; + + @Param(required = false, maxLength = 32, minLength = 32, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String keyProviderUuid; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.tpm.api.UpdateTpmResult value = res.getResult(org.zstack.sdk.tpm.api.UpdateTpmResult.class); + ret.value = value == null ? new org.zstack.sdk.tpm.api.UpdateTpmResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/tpms"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "updateTpm"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmResult.java b/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmResult.java new file mode 100644 index 00000000000..1157fd732cb --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/api/UpdateTpmResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.tpm.api; + +import org.zstack.sdk.tpm.entity.TpmInventory; + +public class UpdateTpmResult { + public TpmInventory inventory; + public void setInventory(TpmInventory inventory) { + this.inventory = inventory; + } + public TpmInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java new file mode 100644 index 00000000000..f77138cdfc7 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java @@ -0,0 +1,79 @@ +package org.zstack.sdk.tpm.entity; + + + +public class TpmCapabilityView { + + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; + } + public java.lang.String getUuid() { + return this.uuid; + } + + public java.lang.String name; + public void setName(java.lang.String name) { + this.name = name; + } + public java.lang.String getName() { + return this.name; + } + + public java.lang.String vmInstanceUuid; + public void setVmInstanceUuid(java.lang.String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + public java.lang.String getVmInstanceUuid() { + return this.vmInstanceUuid; + } + + public java.sql.Timestamp createDate; + public void setCreateDate(java.sql.Timestamp createDate) { + this.createDate = createDate; + } + public java.sql.Timestamp getCreateDate() { + return this.createDate; + } + + public java.sql.Timestamp lastOpDate; + public void setLastOpDate(java.sql.Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + public java.sql.Timestamp getLastOpDate() { + return this.lastOpDate; + } + + public java.util.List hostRefs; + public void setHostRefs(java.util.List hostRefs) { + this.hostRefs = hostRefs; + } + public java.util.List getHostRefs() { + return this.hostRefs; + } + + public java.lang.String edkVersion; + public void setEdkVersion(java.lang.String edkVersion) { + this.edkVersion = edkVersion; + } + public java.lang.String getEdkVersion() { + return this.edkVersion; + } + + public java.lang.String swtpmVersion; + public void setSwtpmVersion(java.lang.String swtpmVersion) { + this.swtpmVersion = swtpmVersion; + } + public java.lang.String getSwtpmVersion() { + return this.swtpmVersion; + } + + public boolean resetTpmAfterVmCloneConfig; + public void setResetTpmAfterVmCloneConfig(boolean resetTpmAfterVmCloneConfig) { + this.resetTpmAfterVmCloneConfig = resetTpmAfterVmCloneConfig; + } + public boolean getResetTpmAfterVmCloneConfig() { + return this.resetTpmAfterVmCloneConfig; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java new file mode 100644 index 00000000000..85db7ad9eb8 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java @@ -0,0 +1,55 @@ +package org.zstack.sdk.tpm.entity; + + + +public class TpmHostRefInventory { + + public long id; + public void setId(long id) { + this.id = id; + } + public long getId() { + return this.id; + } + + public java.lang.String tpmUuid; + public void setTpmUuid(java.lang.String tpmUuid) { + this.tpmUuid = tpmUuid; + } + public java.lang.String getTpmUuid() { + return this.tpmUuid; + } + + public java.lang.String hostUuid; + public void setHostUuid(java.lang.String hostUuid) { + this.hostUuid = hostUuid; + } + public java.lang.String getHostUuid() { + return this.hostUuid; + } + + public java.lang.String path; + public void setPath(java.lang.String path) { + this.path = path; + } + public java.lang.String getPath() { + return this.path; + } + + public java.sql.Timestamp createDate; + public void setCreateDate(java.sql.Timestamp createDate) { + this.createDate = createDate; + } + public java.sql.Timestamp getCreateDate() { + return this.createDate; + } + + public java.sql.Timestamp lastOpDate; + public void setLastOpDate(java.sql.Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + public java.sql.Timestamp getLastOpDate() { + return this.lastOpDate; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java new file mode 100644 index 00000000000..e4fa21a0746 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java @@ -0,0 +1,55 @@ +package org.zstack.sdk.tpm.entity; + + + +public class TpmInventory { + + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; + } + public java.lang.String getUuid() { + return this.uuid; + } + + public java.lang.String name; + public void setName(java.lang.String name) { + this.name = name; + } + public java.lang.String getName() { + return this.name; + } + + public java.lang.String vmInstanceUuid; + public void setVmInstanceUuid(java.lang.String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + public java.lang.String getVmInstanceUuid() { + return this.vmInstanceUuid; + } + + public java.sql.Timestamp createDate; + public void setCreateDate(java.sql.Timestamp createDate) { + this.createDate = createDate; + } + public java.sql.Timestamp getCreateDate() { + return this.createDate; + } + + public java.sql.Timestamp lastOpDate; + public void setLastOpDate(java.sql.Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + public java.sql.Timestamp getLastOpDate() { + return this.lastOpDate; + } + + public java.util.List hostRefs; + public void setHostRefs(java.util.List hostRefs) { + this.hostRefs = hostRefs; + } + public java.util.List getHostRefs() { + return this.hostRefs; + } + +} diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 6bf72dbe39a..7a23a1a7b42 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -243,4 +243,11 @@ + + + + + + + diff --git a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy index 07c05b73b9e..51b3f4811da 100644 --- a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy +++ b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy @@ -6929,33 +6929,6 @@ abstract class ApiHelper { } - def updateHostname(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.UpdateHostnameAction.class) Closure c) { - def a = new org.zstack.sdk.UpdateHostnameAction() - a.sessionId = Test.currentEnvSpec?.session?.uuid - c.resolveStrategy = Closure.OWNER_FIRST - c.delegate = a - c() - - - if (System.getProperty("apipath") != null) { - if (a.apiId == null) { - a.apiId = Platform.uuid - } - - def tracker = new ApiPathTracker(a.apiId) - def out = errorOut(a.call()) - def path = tracker.getApiPath() - if (!path.isEmpty()) { - Test.apiPaths[a.class.name] = path.join(" --->\n") - } - - return out - } else { - return errorOut(a.call()) - } - } - - def createIPsecConnection(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.CreateIPsecConnectionAction.class) Closure c) { def a = new org.zstack.sdk.CreateIPsecConnectionAction() a.sessionId = Test.currentEnvSpec?.session?.uuid @@ -32212,6 +32185,33 @@ abstract class ApiHelper { } + def updateHostname(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.UpdateHostnameAction.class) Closure c) { + def a = new org.zstack.sdk.UpdateHostnameAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def updateIPsecConnection(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.UpdateIPsecConnectionAction.class) Closure c) { def a = new org.zstack.sdk.UpdateIPsecConnectionAction() a.sessionId = Test.currentEnvSpec?.session?.uuid @@ -38395,8 +38395,8 @@ abstract class ApiHelper { } - def addZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.AddZBoxAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.AddZBoxAction() + def cleanSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.CleanSoftwarePackageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.CleanSoftwarePackageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38422,8 +38422,8 @@ abstract class ApiHelper { } - def createZBoxBackup(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.CreateZBoxBackupAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.CreateZBoxBackupAction() + def getDirectoryUsage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.GetDirectoryUsageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.GetDirectoryUsageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38449,8 +38449,8 @@ abstract class ApiHelper { } - def ejectZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.EjectZBoxAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.EjectZBoxAction() + def getUploadSoftwarePackageJobDetails(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.GetUploadSoftwarePackageJobDetailsAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.GetUploadSoftwarePackageJobDetailsAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38476,8 +38476,8 @@ abstract class ApiHelper { } - def getZBoxBackupDetails(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.GetZBoxBackupDetailsAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.GetZBoxBackupDetailsAction() + def installSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.InstallSoftwarePackageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.InstallSoftwarePackageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38503,8 +38503,8 @@ abstract class ApiHelper { } - def queryZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.QueryZBoxAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.QueryZBoxAction() + def querySoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.QuerySoftwarePackageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.QuerySoftwarePackageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38532,15 +38532,13 @@ abstract class ApiHelper { } - def queryZBoxBackup(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.QueryZBoxBackupAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.QueryZBoxBackupAction() + def uninstallSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.UninstallSoftwarePackageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.UninstallSoftwarePackageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() - a.conditions = a.conditions.collect { it.toString() } - if (System.getProperty("apipath") != null) { if (a.apiId == null) { @@ -38561,8 +38559,8 @@ abstract class ApiHelper { } - def syncZBoxCapacity(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.SyncZBoxCapacityAction.class) Closure c) { - def a = new org.zstack.sdk.zbox.SyncZBoxCapacityAction() + def uploadSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.UploadSoftwarePackageAction.class) Closure c) { + def a = new org.zstack.sdk.softwarePackage.header.UploadSoftwarePackageAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a @@ -38588,25 +38586,26 @@ abstract class ApiHelper { } - def cleanSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.CleanSoftwarePackageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.CleanSoftwarePackageAction() + def addTpm(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.tpm.api.AddTpmAction.class) Closure c) { + def a = new org.zstack.sdk.tpm.api.AddTpmAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } - + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } - + return out } else { return errorOut(a.call()) @@ -38614,25 +38613,82 @@ abstract class ApiHelper { } - def getDirectoryUsage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.GetDirectoryUsageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.GetDirectoryUsageAction() + def getTpmCapability(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.tpm.api.GetTpmCapabilityAction.class) Closure c) { + def a = new org.zstack.sdk.tpm.api.GetTpmCapabilityAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def queryTpm(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.tpm.api.QueryTpmAction.class) Closure c) { + def a = new org.zstack.sdk.tpm.api.QueryTpmAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + a.conditions = a.conditions.collect { it.toString() } + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } + + return out + } else { + return errorOut(a.call()) + } + } + + def removeTpm(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.tpm.api.RemoveTpmAction.class) Closure c) { + def a = new org.zstack.sdk.tpm.api.RemoveTpmAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + return out } else { return errorOut(a.call()) @@ -38640,25 +38696,80 @@ abstract class ApiHelper { } - def getUploadSoftwarePackageJobDetails(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.GetUploadSoftwarePackageJobDetailsAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.GetUploadSoftwarePackageJobDetailsAction() + def updateTpm(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.tpm.api.UpdateTpmAction.class) Closure c) { + def a = new org.zstack.sdk.tpm.api.UpdateTpmAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def addZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.AddZBoxAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.AddZBoxAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } + + return out + } else { + return errorOut(a.call()) + } + } + + def createZBoxBackup(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.CreateZBoxBackupAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.CreateZBoxBackupAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + return out } else { return errorOut(a.call()) @@ -38666,22 +38777,26 @@ abstract class ApiHelper { } - def installSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.InstallSoftwarePackageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.InstallSoftwarePackageAction() + def ejectZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.EjectZBoxAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.EjectZBoxAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + + if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } + return out } else { return errorOut(a.call()) @@ -38689,13 +38804,40 @@ abstract class ApiHelper { } - def querySoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.QuerySoftwarePackageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.QuerySoftwarePackageAction() + def getZBoxBackupDetails(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.GetZBoxBackupDetailsAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.GetZBoxBackupDetailsAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def queryZBox(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.QueryZBoxAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.QueryZBoxAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + a.conditions = a.conditions.collect { it.toString() } @@ -38703,14 +38845,14 @@ abstract class ApiHelper { if (a.apiId == null) { a.apiId = Platform.uuid } - + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } - + return out } else { return errorOut(a.call()) @@ -38718,26 +38860,28 @@ abstract class ApiHelper { } - def uninstallSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.UninstallSoftwarePackageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.UninstallSoftwarePackageAction() + def queryZBoxBackup(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.QueryZBoxBackupAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.QueryZBoxBackupAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + + a.conditions = a.conditions.collect { it.toString() } if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } - + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } - + return out } else { return errorOut(a.call()) @@ -38745,25 +38889,26 @@ abstract class ApiHelper { } - def uploadSoftwarePackage(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.softwarePackage.header.UploadSoftwarePackageAction.class) Closure c) { - def a = new org.zstack.sdk.softwarePackage.header.UploadSoftwarePackageAction() + def syncZBoxCapacity(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.zbox.SyncZBoxCapacityAction.class) Closure c) { + def a = new org.zstack.sdk.zbox.SyncZBoxCapacityAction() a.sessionId = Test.currentEnvSpec?.session?.uuid c.resolveStrategy = Closure.OWNER_FIRST c.delegate = a c() + if (System.getProperty("apipath") != null) { if (a.apiId == null) { a.apiId = Platform.uuid } - + def tracker = new ApiPathTracker(a.apiId) def out = errorOut(a.call()) def path = tracker.getApiPath() if (!path.isEmpty()) { Test.apiPaths[a.class.name] = path.join(" --->\n") } - + return out } else { return errorOut(a.call()) diff --git a/utils/src/main/java/org/zstack/utils/StringDSL.java b/utils/src/main/java/org/zstack/utils/StringDSL.java index 54cbf4135b8..705b88a8b4d 100755 --- a/utils/src/main/java/org/zstack/utils/StringDSL.java +++ b/utils/src/main/java/org/zstack/utils/StringDSL.java @@ -150,8 +150,12 @@ public static boolean isApiId(String apiId) { } public static String createFixedUuid(Class voClass) { + return createFixedUuid(voClass.getSimpleName()); + } + + public static String createFixedUuid(String text) { StringBuilder builder = new StringBuilder( - UUID.nameUUIDFromBytes(voClass.getSimpleName().getBytes()).toString().replace("-", "")); + UUID.nameUUIDFromBytes(text.getBytes()).toString().replace("-", "")); builder.setCharAt(0, 'f'); builder.setCharAt(1, 'f'); builder.setCharAt(2, '0'); From 6ffa50e5631796250580d999cbc64df70c85d446 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 4 Feb 2026 15:21:52 +0800 Subject: [PATCH 23/76] [compute]: support create VM with TPM Resolves: ZSV-11310 Change-Id: I6b746e6a666d7267626d647777677a77716a6261 --- .../vm/InstantiateVmFromNewCreatedStruct.java | 12 +++++++ .../org/zstack/compute/vm/VmInstanceBase.java | 1 + .../compute/vm/VmInstanceManagerImpl.java | 3 +- .../zstack/compute/vm/VmInstanceUtils.java | 1 + .../compute/vm/devices/VmTpmExtensions.java | 27 ++++++++++++++++ .../compute/vm/devices/VmTpmManager.java | 29 +++++++++++++++++ conf/springConfigXml/Kvm.xml | 6 ++++ conf/springConfigXml/VmInstanceManager.xml | 8 +++++ ...plianceVmInstanceCreateExtensionPoint.java | 5 +++ .../zstack/header/vm/CreateVmInstanceMsg.java | 10 ++++++ .../InstantiateNewCreatedVmInstanceMsg.java | 10 ++++++ .../vm/VmInstanceCreateExtensionPoint.java | 6 ++-- .../org/zstack/header/vm/VmInstanceSpec.java | 10 ++++++ .../appliancevm/CreateApplianceVmJob.java | 4 +-- .../java/org/zstack/kvm/KVMAgentCommands.java | 10 ++++++ .../org/zstack/kvm/tpm/KvmTpmExtensions.java | 32 +++++++++++++++++++ .../org/zstack/kvm/tpm/KvmTpmManager.java | 16 ++++------ .../main/java/org/zstack/kvm/tpm/TpmTO.java | 15 +++++++++ .../test/resources/springConfigXml/Kvm.xml | 6 ++++ 19 files changed, 196 insertions(+), 15 deletions(-) create mode 100644 compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java create mode 100644 compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java create mode 100644 header/src/main/java/org/zstack/header/vm/ApplianceVmInstanceCreateExtensionPoint.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java diff --git a/compute/src/main/java/org/zstack/compute/vm/InstantiateVmFromNewCreatedStruct.java b/compute/src/main/java/org/zstack/compute/vm/InstantiateVmFromNewCreatedStruct.java index 485d78a1b79..55d5eb7a1c1 100644 --- a/compute/src/main/java/org/zstack/compute/vm/InstantiateVmFromNewCreatedStruct.java +++ b/compute/src/main/java/org/zstack/compute/vm/InstantiateVmFromNewCreatedStruct.java @@ -3,6 +3,7 @@ import org.zstack.header.configuration.VmCustomSpecificationStruct; import org.zstack.header.host.CpuArchitecture; import org.zstack.header.vm.*; +import org.zstack.header.vm.devices.VmDevicesSpec; import java.util.ArrayList; import java.util.List; @@ -32,6 +33,7 @@ public class InstantiateVmFromNewCreatedStruct { private List dataDisks; private List deprecatedDataVolumeSpecs = new ArrayList<>(); private VmCustomSpecificationStruct vmCustomSpecification; + private VmDevicesSpec devicesSpec; public List getCandidatePrimaryStorageUuidsForRootVolume() { return candidatePrimaryStorageUuidsForRootVolume; @@ -87,6 +89,14 @@ public void setVmCustomSpecification(VmCustomSpecificationStruct vmCustomSpecifi this.vmCustomSpecification = vmCustomSpecification; } + public VmDevicesSpec getDevicesSpec() { + return devicesSpec; + } + + public void setDevicesSpec(VmDevicesSpec devicesSpec) { + this.devicesSpec = devicesSpec; + } + public List getRootVolumeSystemTags() { return rootVolumeSystemTags; } @@ -167,6 +177,7 @@ public static InstantiateVmFromNewCreatedStruct fromMessage(InstantiateNewCreate struct.setDataDisks(msg.getDataDisks()); struct.setDeprecatedDataVolumeSpecs(msg.getDeprecatedDataVolumeSpecs()); struct.setVmCustomSpecification(msg.getVmCustomSpecification()); + struct.setDevicesSpec(msg.getDevicesSpec()); return struct; } @@ -187,6 +198,7 @@ public static InstantiateVmFromNewCreatedStruct fromMessage(CreateVmInstanceMsg struct.setDataDisks(msg.getDataDisks()); struct.setDeprecatedDataVolumeSpecs(msg.getDeprecatedDataVolumeSpecs()); struct.setVmCustomSpecification(msg.getVmCustomSpecification()); + struct.setDevicesSpec(msg.getDevicesSpec()); return struct; } diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java index e31bc001218..9ad123a6ef1 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java @@ -7705,6 +7705,7 @@ private VmInstanceSpec buildVmInstanceSpecFromStruct(InstantiateVmFromNewCreated spec.setDataDisks(struct.getDataDisks()); spec.setDeprecatedDisksSpecs(struct.getDeprecatedDataVolumeSpecs()); spec.setVmCustomSpecification(struct.getVmCustomSpecification()); + spec.setDevicesSpec(struct.getDevicesSpec()); List cdRomSpecs = buildVmCdRomSpecsForNewCreated(spec); spec.setCdRomSpecs(cdRomSpecs); diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java index 0838c24d9c3..d3a6d6396b2 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java @@ -1217,7 +1217,7 @@ public void setup() { @Override public void run(FlowTrigger trigger, Map data) { pluginRgty.getExtensionList(VmInstanceCreateExtensionPoint.class).forEach( - extensionPoint -> extensionPoint.afterPersistVmInstanceVO(finalVo)); + extensionPoint -> extensionPoint.afterPersistVmInstanceVO(finalVo, msg)); trigger.next(); } @@ -1352,6 +1352,7 @@ public void run(FlowTrigger trigger, Map data) { smsg.setDataDisks(msg.getDataDisks()); smsg.setDeprecatedDataVolumeSpecs(msg.getDeprecatedDataVolumeSpecs()); smsg.setVmCustomSpecification(msg.getVmCustomSpecification()); + smsg.setDevicesSpec(msg.getDevicesSpec()); bus.makeTargetServiceIdByResourceUuid(smsg, VmInstanceConstant.SERVICE_ID, finalVo.getUuid()); bus.send(smsg, new CloudBusCallBack(smsg) { @Override diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceUtils.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceUtils.java index 33afa043278..bebcae3520b 100644 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceUtils.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceUtils.java @@ -44,6 +44,7 @@ public static CreateVmInstanceMsg fromAPICreateVmInstanceMsg(APICreateVmInstance cmsg.setGuestOsType(msg.getGuestOsType()); cmsg.setArchitecture(msg.getArchitecture()); cmsg.setStrategy(msg.getStrategy()); + cmsg.setDevicesSpec(msg.getDevicesSpec()); if (CollectionUtils.isNotEmpty(msg.getDataDiskOfferingUuids()) || CollectionUtils.isNotEmpty(msg.getDataDiskSizes())) { cmsg.setPrimaryStorageUuidForDataVolume(getPSUuidForDataVolume(msg.getSystemTags())); } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java new file mode 100644 index 00000000000..4b25befa82f --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java @@ -0,0 +1,27 @@ +package org.zstack.compute.vm.devices; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.header.vm.CreateVmInstanceMsg; +import org.zstack.header.vm.VmInstanceCreateExtensionPoint; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.header.vm.devices.VmDevicesSpec; + +public class VmTpmExtensions implements VmInstanceCreateExtensionPoint { + @Autowired + private VmTpmManager vmTpmManager; + + @Override + public void preCreateVmInstance(CreateVmInstanceMsg msg) { + // do-nothing + } + + @Override + public void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) { + final VmDevicesSpec spec = msg.getDevicesSpec(); + if (spec == null || spec.getTpm() == null || !spec.getTpm().isEnable()) { + return; + } + + vmTpmManager.persistTpmVO(null, vo.getUuid()); + } +} diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java new file mode 100644 index 00000000000..a37e57d8426 --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java @@ -0,0 +1,29 @@ +package org.zstack.compute.vm.devices; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.core.Platform; +import org.zstack.core.db.DatabaseFacade; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +public class VmTpmManager { + private static final CLogger logger = Utils.getLogger(VmTpmManager.class); + + @Autowired + private DatabaseFacade databaseFacade; + + public TpmVO persistTpmVO(String tpmUuid, String vmUuid) { + if (tpmUuid == null) { + tpmUuid = Platform.getUuid(); + } + TpmVO tpm = new TpmVO(); + tpm.setUuid(tpmUuid); + tpm.setResourceName("TPM-for-VM-" + vmUuid); + tpm.setVmInstanceUuid(vmUuid); + databaseFacade.persistAndRefresh(tpm); + + logger.debug("Persisted TpmVO for VM " + vmUuid + " with uuid=" + tpm.getUuid()); + return tpm; + } +} diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 7a7160db29a..4fb474f97a9 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -251,4 +251,10 @@ + + + + + + diff --git a/conf/springConfigXml/VmInstanceManager.xml b/conf/springConfigXml/VmInstanceManager.xml index 8d3f7cc41e6..6786f86c780 100755 --- a/conf/springConfigXml/VmInstanceManager.xml +++ b/conf/springConfigXml/VmInstanceManager.xml @@ -279,4 +279,12 @@ + + + + + + + + diff --git a/header/src/main/java/org/zstack/header/vm/ApplianceVmInstanceCreateExtensionPoint.java b/header/src/main/java/org/zstack/header/vm/ApplianceVmInstanceCreateExtensionPoint.java new file mode 100644 index 00000000000..ef723f29a1d --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/ApplianceVmInstanceCreateExtensionPoint.java @@ -0,0 +1,5 @@ +package org.zstack.header.vm; + +public interface ApplianceVmInstanceCreateExtensionPoint { + default void afterPersistApplianceVmInstanceVO(VmInstanceVO vo) {} +} diff --git a/header/src/main/java/org/zstack/header/vm/CreateVmInstanceMsg.java b/header/src/main/java/org/zstack/header/vm/CreateVmInstanceMsg.java index 037d9f8b323..5b6f04dac3a 100755 --- a/header/src/main/java/org/zstack/header/vm/CreateVmInstanceMsg.java +++ b/header/src/main/java/org/zstack/header/vm/CreateVmInstanceMsg.java @@ -2,6 +2,7 @@ import org.zstack.header.configuration.VmCustomSpecificationStruct; import org.zstack.header.message.NeedReplyMessage; +import org.zstack.header.vm.devices.VmDevicesSpec; import java.util.ArrayList; import java.util.List; @@ -45,6 +46,7 @@ public class CreateVmInstanceMsg extends NeedReplyMessage implements CreateVmIns private List dataDisks; private List deprecatedDataVolumeSpecs = new ArrayList<>(); private VmCustomSpecificationStruct vmCustomSpecification; + private VmDevicesSpec devicesSpec; public List getCandidatePrimaryStorageUuidsForRootVolume() { return candidatePrimaryStorageUuidsForRootVolume; @@ -363,4 +365,12 @@ public VmCustomSpecificationStruct getVmCustomSpecification() { public void setVmCustomSpecification(VmCustomSpecificationStruct vmCustomSpecification) { this.vmCustomSpecification = vmCustomSpecification; } + + public VmDevicesSpec getDevicesSpec() { + return devicesSpec; + } + + public void setDevicesSpec(VmDevicesSpec devicesSpec) { + this.devicesSpec = devicesSpec; + } } diff --git a/header/src/main/java/org/zstack/header/vm/InstantiateNewCreatedVmInstanceMsg.java b/header/src/main/java/org/zstack/header/vm/InstantiateNewCreatedVmInstanceMsg.java index 55a57474009..8c01f099ea2 100755 --- a/header/src/main/java/org/zstack/header/vm/InstantiateNewCreatedVmInstanceMsg.java +++ b/header/src/main/java/org/zstack/header/vm/InstantiateNewCreatedVmInstanceMsg.java @@ -3,6 +3,7 @@ import org.zstack.header.configuration.VmCustomSpecificationStruct; import org.zstack.header.host.CpuArchitecture; import org.zstack.header.message.NeedReplyMessage; +import org.zstack.header.vm.devices.VmDevicesSpec; import java.util.ArrayList; import java.util.List; @@ -59,6 +60,7 @@ public void setCandidatePrimaryStorageUuidsForDataVolume(List candidateP private DiskAO rootDisk; private List dataDisks; private List deprecatedDataVolumeSpecs; + private VmDevicesSpec devicesSpec; public DiskAO getRootDisk() { return rootDisk; @@ -84,6 +86,14 @@ public void setDeprecatedDataVolumeSpecs(List deprecatedDataVolumeSpecs) this.deprecatedDataVolumeSpecs = deprecatedDataVolumeSpecs; } + public VmDevicesSpec getDevicesSpec() { + return devicesSpec; + } + + public void setDevicesSpec(VmDevicesSpec devicesSpec) { + this.devicesSpec = devicesSpec; + } + public List getSoftAvoidHostUuids() { return softAvoidHostUuids; } diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java b/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java index e3bbe33bf11..029eaa43334 100644 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java @@ -1,10 +1,12 @@ package org.zstack.header.vm; /** - * Created by lining on 2019/4/17. + * Only for UserVM. + * + * Appliance VM use {@link ApplianceVmInstanceCreateExtensionPoint} */ public interface VmInstanceCreateExtensionPoint { void preCreateVmInstance(CreateVmInstanceMsg msg); - default void afterPersistVmInstanceVO(VmInstanceVO vo) {} + default void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) {} } diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java b/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java index 4732fb36d40..18620a65fad 100755 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java @@ -17,6 +17,7 @@ import org.zstack.header.network.l3.L3NetworkInventory; import org.zstack.header.storage.primary.PrimaryStorageInventory; import org.zstack.header.vm.VmInstanceConstant.VmOperation; +import org.zstack.header.vm.devices.VmDevicesSpec; import org.zstack.header.volume.VolumeFormat; import org.zstack.header.volume.VolumeInventory; import org.zstack.header.volume.VolumeType; @@ -403,6 +404,7 @@ public void setCandidatePrimaryStorageUuidsForDataVolume(List candidateP private List dataDisks; private List deprecatedDisksSpecs = new ArrayList<>(); private VmCustomSpecificationStruct vmCustomSpecification; + private VmDevicesSpec devicesSpec; public DiskAO getRootDisk() { return rootDisk; @@ -436,6 +438,14 @@ public void setVmCustomSpecification(VmCustomSpecificationStruct vmCustomSpecifi this.vmCustomSpecification = vmCustomSpecification; } + public VmDevicesSpec getDevicesSpec() { + return devicesSpec; + } + + public void setDevicesSpec(VmDevicesSpec devicesSpec) { + this.devicesSpec = devicesSpec; + } + public boolean isSkipIpAllocation() { return skipIpAllocation; } diff --git a/plugin/applianceVm/src/main/java/org/zstack/appliancevm/CreateApplianceVmJob.java b/plugin/applianceVm/src/main/java/org/zstack/appliancevm/CreateApplianceVmJob.java index 98419fcc8a2..48e3f429ab4 100755 --- a/plugin/applianceVm/src/main/java/org/zstack/appliancevm/CreateApplianceVmJob.java +++ b/plugin/applianceVm/src/main/java/org/zstack/appliancevm/CreateApplianceVmJob.java @@ -177,8 +177,8 @@ protected ApplianceVmVO scripts() { } final ApplianceVmVO finalVO = avo; - pluginRgty.getExtensionList(VmInstanceCreateExtensionPoint.class).forEach( - extension -> extension.afterPersistVmInstanceVO(finalVO)); + pluginRgty.getExtensionList(ApplianceVmInstanceCreateExtensionPoint.class).forEach( + extension -> extension.afterPersistApplianceVmInstanceVO(finalVO)); trigger.next(); } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index f69eff87317..2dd6e223fc3 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -13,6 +13,7 @@ import org.zstack.header.vm.*; import org.zstack.header.vm.devices.DeviceAddress; import org.zstack.header.vm.devices.VirtualDeviceInfo; +import org.zstack.kvm.tpm.TpmTO; import org.zstack.network.securitygroup.RuleTO; import org.zstack.network.securitygroup.SecurityGroupMembersTO; import org.zstack.network.securitygroup.VmNicSecurityTO; @@ -2068,6 +2069,7 @@ public static class StartVmCmd extends vdiCmd implements VmAddOnsCmd { private List dataVolumes; private List cacheVolumes; private List nics; + private TpmTO tpm; private long timeout; private Map addons; private boolean instanceOfferingOnlineChange; @@ -2545,6 +2547,14 @@ public void setNics(List nics) { this.nics = nics; } + public TpmTO getTpm() { + return tpm; + } + + public void setTpm(TpmTO tpm) { + this.tpm = tpm; + } + public long getTimeout() { return timeout; } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java new file mode 100644 index 00000000000..7ff72c6affa --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java @@ -0,0 +1,32 @@ +package org.zstack.kvm.tpm; + +import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.vm.VmInstanceSpec; +import org.zstack.header.vm.devices.VmDevicesSpec; +import org.zstack.kvm.KVMAgentCommands; +import org.zstack.kvm.KVMHostInventory; +import org.zstack.kvm.KVMStartVmExtensionPoint; + +public class KvmTpmExtensions implements KVMStartVmExtensionPoint { + @Override + public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { + final VmDevicesSpec devicesSpec = spec.getDevicesSpec(); + if (devicesSpec == null || devicesSpec.getTpm() == null || !devicesSpec.getTpm().isEnable()) { + return; + } + + TpmTO tpm = new TpmTO(); + tpm.setKeyProviderUuid(devicesSpec.getTpm().getKeyProviderUuid()); + cmd.setTpm(tpm); + } + + @Override + public void startVmOnKvmSuccess(KVMHostInventory host, VmInstanceSpec spec) { + // do-nothing + } + + @Override + public void startVmOnKvmFailed(KVMHostInventory host, VmInstanceSpec spec, ErrorCode err) { + // do-nothing + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java index 2d643de746b..44b31131c87 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java @@ -1,10 +1,10 @@ package org.zstack.kvm.tpm; import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.vm.devices.VmTpmManager; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.CloudBusCallBack; import org.zstack.core.cloudbus.MessageSafe; -import org.zstack.core.db.DatabaseFacade; import org.zstack.core.db.Q; import org.zstack.core.db.SQL; import org.zstack.core.thread.ChainTask; @@ -63,9 +63,9 @@ public class KvmTpmManager extends AbstractService { @Autowired private ThreadFacade threadFacade; @Autowired - private DatabaseFacade databaseFacade; - @Autowired private ResourceConfigFacade resourceConfigFacade; + @Autowired + private VmTpmManager vmTpmManager; @Override public boolean start() { @@ -197,11 +197,7 @@ public void run(FlowTrigger trigger, Map data) { @Override public void run(FlowTrigger trigger, Map data) { - TpmVO tpm = new TpmVO(); - tpm.setUuid(context.tpmUuid); - tpm.setResourceName("TPM-for-VM-" + context.vmInstanceUuid); - tpm.setVmInstanceUuid(context.vmInstanceUuid); - databaseFacade.persist(tpm); + vmTpmManager.persistTpmVO(context.tpmUuid, context.vmInstanceUuid); trigger.next(); } }).done(new FlowDoneHandler(completion) { @@ -312,10 +308,10 @@ private void handle(APIGetTpmCapabilityMsg msg) { final TpmVO tpm = Q.New(TpmVO.class) .eq(TpmVO_.uuid, msg.getTpmUuid()) - .findValue(); + .find(); final VmInstanceVO vm = Q.New(VmInstanceVO.class) .eq(VmInstanceVO_.uuid, tpm.getVmInstanceUuid()) - .findValue(); + .find(); view.setTpmInventory(TpmInventory.valueOf(tpm)); view.setEdkVersion(VM_EDK.getTokenByResourceUuid(vm.getUuid(), EDK_RPM_TOKEN)); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java new file mode 100644 index 00000000000..d3210a3c2d7 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java @@ -0,0 +1,15 @@ +package org.zstack.kvm.tpm; + +import java.io.Serializable; + +public class TpmTO implements Serializable { + private String keyProviderUuid; + + public String getKeyProviderUuid() { + return keyProviderUuid; + } + + public void setKeyProviderUuid(String keyProviderUuid) { + this.keyProviderUuid = keyProviderUuid; + } +} diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 7a23a1a7b42..9551587dd5d 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -250,4 +250,10 @@ + + + + + + From 88ec387898bd83c5d9904a8c034b43ffa93776e6 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 4 Feb 2026 18:30:53 +0800 Subject: [PATCH 24/76] [compute]: add secure boot dependency Resolves: ZSV-11310 Resolves: ZSPHER-1 Change-Id: I6d787a7375636c77656176616478717872676478 --- .../org/zstack/compute/vm/VmGlobalConfig.java | 1 + conf/springConfigXml/Kvm.xml | 6 +++ .../java/org/zstack/kvm/KVMAgentCommands.java | 10 ++++ .../src/main/java/org/zstack/kvm/KVMHost.java | 4 -- .../kvm/efi/KvmSecureBootExtensions.java | 53 +++++++++++++++++++ .../test/resources/springConfigXml/Kvm.xml | 6 +++ 6 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java diff --git a/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java b/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java index 37621cca6a7..d588936ee53 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmGlobalConfig.java @@ -113,6 +113,7 @@ public class VmGlobalConfig { @GlobalConfigDef(defaultValue = "false", type = Boolean.class, description = "sync clock after vm resume") public static GlobalConfig VM_CLOCK_SYNC_AFTER_VM_RESUME = new GlobalConfig(CATEGORY, "vm.clock.sync.after.vm.resume"); + @BindResourceConfig(value = {VmInstanceVO.class, ClusterVO.class}) @GlobalConfigValidation(validValues = {"true", "false"}) public static GlobalConfig ENABLE_UEFI_SECURE_BOOT = new GlobalConfig(CATEGORY, "enable.uefi.secure.boot"); diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 4fb474f97a9..9536f559eb3 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -257,4 +257,10 @@ + + + + + + diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 2dd6e223fc3..6ec7e3429b7 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -2104,6 +2104,8 @@ public static class StartVmCmd extends vdiCmd implements VmAddOnsCmd { private String bootMode; // used when bootMode == 'UEFI' private boolean secureBoot; + private String edkVersion; + private boolean fromForeignHypervisor; private String machineType; private Integer pciePortNums; @@ -2260,6 +2262,14 @@ public void setSecureBoot(boolean secureBoot) { this.secureBoot = secureBoot; } + public String getEdkVersion() { + return edkVersion; + } + + public void setEdkVersion(String edkVersion) { + this.edkVersion = edkVersion; + } + public boolean isEmulateHyperV() { return emulateHyperV; } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 23d7b1cfe47..9d22cf50976 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -4519,10 +4519,6 @@ protected void startVm(final VmInstanceSpec spec, final NeedReplyMessage msg, fi String bootMode = VmSystemTags.BOOT_MODE.getTokenByResourceUuid(spec.getVmInventory().getUuid(), VmSystemTags.BOOT_MODE_TOKEN); cmd.setBootMode(bootMode == null ? ImageBootMode.Legacy.toString() : bootMode); - if (cmd.getBootMode().equals(ImageBootMode.UEFI.toString()) - || cmd.getBootMode().equals(ImageBootMode.UEFI_WITH_CSM.toString())) { - cmd.setSecureBoot(VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT.value(Boolean.class)); - } deviceBootOrderOperator.updateVmDeviceBootOrder(cmd, spec); cmd.setBootDev(toKvmBootDev(spec.getBootOrders())); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java new file mode 100644 index 00000000000..1ff53a531bc --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -0,0 +1,53 @@ +package org.zstack.kvm.efi; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.vm.VmGlobalConfig; +import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.image.ImageBootMode; +import org.zstack.header.vm.VmInstanceSpec; +import org.zstack.kvm.KVMAgentCommands; +import org.zstack.kvm.KVMGlobalConfig; +import org.zstack.kvm.KVMHostInventory; +import org.zstack.kvm.KVMStartVmExtensionPoint; +import org.zstack.resourceconfig.ResourceConfig; +import org.zstack.resourceconfig.ResourceConfigFacade; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint { + private static final CLogger logger = Utils.getLogger(KvmSecureBootExtensions.class); + + @Autowired + private ResourceConfigFacade resourceConfigFacade; + + @Override + public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { + if (!isUefiBootMode(cmd.getBootMode())) { + return; + } + + ResourceConfig resourceConfig; + resourceConfig = resourceConfigFacade.getResourceConfig(VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT.getIdentity()); + cmd.setSecureBoot(resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), Boolean.class)); + + resourceConfig = resourceConfigFacade.getResourceConfig(KVMGlobalConfig.VM_EDK_VERSION_CONFIG.getIdentity()); + final String edkVersion = resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), String.class); + if (edkVersion != null && !edkVersion.isEmpty()) { + cmd.setEdkVersion(edkVersion); + } + } + + @Override + public void startVmOnKvmSuccess(KVMHostInventory host, VmInstanceSpec spec) { + // do-nothing + } + + @Override + public void startVmOnKvmFailed(KVMHostInventory host, VmInstanceSpec spec, ErrorCode err) { + // do-nothing + } + + private boolean isUefiBootMode(String bootMode) { + return bootMode.equals(ImageBootMode.UEFI.toString()) || bootMode.equals(ImageBootMode.UEFI_WITH_CSM.toString()); + } +} diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 9551587dd5d..462aa352dc8 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -256,4 +256,10 @@ + + + + + + From 3c7a4a92c4e2a2f4e2dba4a1eaae255e7e12d7a5 Mon Sep 17 00:00:00 2001 From: "tao.yang" Date: Fri, 6 Feb 2026 19:55:28 +0800 Subject: [PATCH 25/76] [kms]: support kms DBImpact Resolves: ZSV-11331 Change-Id: I786f686371626e6674636772676c68747768716a --- conf/db/zsv/V5.0.0__schema.sql | 83 ++++ conf/persistence.xml | 6 + .../zstack/header/host/HostKeyIdentityVO.java | 78 ++++ .../header/host/HostKeyIdentityVO_.java | 14 + sdk/src/main/java/SourceClassMap.java | 10 + .../org/zstack/sdk/KeyProviderInventory.java | 63 ++++ .../org/zstack/sdk/KmsIdentityInventory.java | 71 ++++ .../java/org/zstack/sdk/KmsInventory.java | 71 ++++ .../java/org/zstack/sdk/NkpInventory.java | 39 ++ .../java/org/zstack/sdk/NkpRestoreInfo.java | 63 ++++ .../api/QueryKeyProviderAction.java | 75 ++++ .../api/QueryKeyProviderResult.java | 22 ++ .../api/RekeyKeyProviderRefsAction.java | 104 +++++ .../api/RekeyKeyProviderRefsResult.java | 7 + .../keyprovider/kms/api/CreateKmsAction.java | 128 +++++++ .../keyprovider/kms/api/CreateKmsResult.java | 14 + .../keyprovider/kms/api/DeleteKmsAction.java | 104 +++++ .../keyprovider/kms/api/DeleteKmsResult.java | 7 + .../keyprovider/kms/api/QueryKmsAction.java | 75 ++++ .../keyprovider/kms/api/QueryKmsResult.java | 22 ++ .../keyprovider/kms/api/UpdateKmsAction.java | 119 ++++++ .../keyprovider/kms/api/UpdateKmsResult.java | 14 + .../keyprovider/nkp/api/BackupNkpAction.java | 104 +++++ .../keyprovider/nkp/api/BackupNkpResult.java | 14 + .../keyprovider/nkp/api/CreateNkpAction.java | 119 ++++++ .../keyprovider/nkp/api/CreateNkpResult.java | 14 + .../keyprovider/nkp/api/DeleteNkpAction.java | 104 +++++ .../keyprovider/nkp/api/DeleteNkpResult.java | 7 + .../nkp/api/ParseNkpRestoreAction.java | 104 +++++ .../nkp/api/ParseNkpRestoreResult.java | 14 + .../keyprovider/nkp/api/QueryNkpAction.java | 75 ++++ .../keyprovider/nkp/api/QueryNkpResult.java | 22 ++ .../keyprovider/nkp/api/RestoreNkpAction.java | 104 +++++ .../keyprovider/nkp/api/RestoreNkpResult.java | 14 + .../keyprovider/nkp/api/UpdateNkpAction.java | 104 +++++ .../keyprovider/nkp/api/UpdateNkpResult.java | 14 + .../java/org/zstack/testlib/ApiHelper.groovy | 357 ++++++++++++++++++ 37 files changed, 2359 insertions(+) create mode 100644 header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java create mode 100644 header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java create mode 100644 sdk/src/main/java/org/zstack/sdk/KeyProviderInventory.java create mode 100644 sdk/src/main/java/org/zstack/sdk/KmsIdentityInventory.java create mode 100644 sdk/src/main/java/org/zstack/sdk/KmsInventory.java create mode 100644 sdk/src/main/java/org/zstack/sdk/NkpInventory.java create mode 100644 sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpResult.java diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index 3ee7e729906..590c58ee528 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -20,3 +20,86 @@ CREATE TABLE IF NOT EXISTS `zstack`.`TpmHostRefVO` ( CONSTRAINT `fkTpmHostRefVOTpmVO` FOREIGN KEY (`tpmUuid`) REFERENCES `TpmVO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE, CONSTRAINT `fkTpmHostRefVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Feature: KMS | ZSPHER-46, ZSPHER-60, ZSPHER-61, ZSPHER-62 + +CREATE TABLE IF NOT EXISTS `zstack`.`KeyProviderVO` ( + `uuid` varchar(32) NOT NULL UNIQUE, + `name` varchar(255) NOT NULL, + `description` varchar(2048) DEFAULT NULL, + `type` varchar(32) NOT NULL, + `connected` boolean NOT NULL DEFAULT FALSE, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + UNIQUE KEY `ukKeyProviderVOName` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`KmsVO` ( + `uuid` varchar(32) NOT NULL UNIQUE, + `endpoint` varchar(255) NOT NULL, + `port` int unsigned NOT NULL, + `kmipVersion` varchar(32) DEFAULT NULL, + `username` varchar(255) DEFAULT NULL, + `password` varchar(255) DEFAULT NULL, + `trusted` boolean NOT NULL DEFAULT FALSE, + `activeIdentityUuid` varchar(32) DEFAULT NULL, + `serverCertExpiredDate` timestamp NULL DEFAULT NULL, + `serverCertPem` text DEFAULT NULL, + PRIMARY KEY (`uuid`), + INDEX `idxKmsVOActiveIdentityUuid` (`activeIdentityUuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`KmsIdentityVO` ( + `uuid` varchar(32) NOT NULL UNIQUE, + `kmsUuid` varchar(32) NOT NULL, + `identityType` varchar(32) NOT NULL, + `clientCertPem` text DEFAULT NULL, + `clientKeyPem` text DEFAULT NULL, + `csrPem` text DEFAULT NULL, + `certExpiredDate` timestamp NULL DEFAULT NULL, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + UNIQUE KEY `ukKmsIdentityVOKmsUuidType` (`kmsUuid`, `identityType`), + INDEX `idxKmsIdentityVOKmsUuid` (`kmsUuid`), + CONSTRAINT `fkKmsIdentityVOKmsVO` FOREIGN KEY (`kmsUuid`) REFERENCES `KmsVO` (`uuid`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`NkpVO` ( + `uuid` varchar(32) NOT NULL UNIQUE, + `kdf` varchar(64) NOT NULL, + `saltPolicy` varchar(64) NOT NULL, + `backedUp` boolean NOT NULL DEFAULT FALSE, + `currentVersion` int unsigned DEFAULT NULL, + PRIMARY KEY (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`HostKeyIdentityVO` ( + `hostUuid` varchar(32) NOT NULL UNIQUE, + `publicKey` text NOT NULL, + `fingerprint` varchar(128) NOT NULL, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`hostUuid`), + CONSTRAINT `fkHostKeyIdentityVOHostEO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`EncryptedResourceKeyRefVO` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT, + `resourceType` varchar(255) NOT NULL, + `resourceUuid` varchar(32) NOT NULL, + `providerUuid` varchar(32) DEFAULT NULL, + `providerName` varchar(255) NOT NULL, + `keyVersion` int unsigned DEFAULT NULL, + `kekRef` varchar(255) DEFAULT NULL, + `wrappedDek` text NOT NULL, + `algorithm` varchar(64) DEFAULT NULL, + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`id`), + INDEX `idxEncryptedResourceKeyRefVOResource` (`resourceUuid`, `resourceType`), + INDEX `idxEncryptedResourceKeyRefVOProviderUuid` (`providerUuid`), + INDEX `idxEncryptedResourceKeyRefVOProviderName` (`providerName`), + CONSTRAINT `fkEncryptedResourceKeyRefVOProviderUuid` FOREIGN KEY (`providerUuid`) REFERENCES `KeyProviderVO` (`uuid`) ON DELETE SET NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/conf/persistence.xml b/conf/persistence.xml index aa295bcb365..ff82022f554 100755 --- a/conf/persistence.xml +++ b/conf/persistence.xml @@ -24,6 +24,7 @@ org.zstack.header.cluster.ClusterEO org.zstack.header.host.HostVO org.zstack.header.host.HostEO + org.zstack.header.host.HostKeyIdentityVO org.zstack.header.host.CpuFeaturesHistoryVO org.zstack.header.storage.primary.PrimaryStorageVO org.zstack.header.storage.primary.PrimaryStorageEO @@ -182,6 +183,11 @@ org.zstack.header.vm.VmInstanceNumaNodeVO org.zstack.header.host.HostNumaNodeVO org.zstack.header.core.encrypt.EncryptionIntegrityVO + org.zstack.header.keyprovider.KeyProviderVO + org.zstack.header.keyprovider.KmsVO + org.zstack.header.keyprovider.KmsIdentityVO + org.zstack.header.keyprovider.NkpVO + org.zstack.header.keyprovider.EncryptedResourceKeyRefVO org.zstack.storage.primary.sharedblock.SharedBlockCapacityVO org.zstack.header.vm.devices.VmInstanceResourceMetadataVO org.zstack.header.vm.devices.VmInstanceResourceMetadataArchiveVO diff --git a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java new file mode 100644 index 00000000000..b37e6a8ce84 --- /dev/null +++ b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java @@ -0,0 +1,78 @@ +package org.zstack.header.host; + +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; + +import javax.persistence.*; +import java.sql.Timestamp; + +@Entity +@Table +@EntityGraph( + parents = { + @EntityGraph.Neighbour(type = HostVO.class, myField = "hostUuid", targetField = "uuid") + } +) +public class HostKeyIdentityVO { + @Id + @Column + @ForeignKey(parentEntityClass = HostEO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String hostUuid; + + @Column + private String publicKey; + + @Column + private String fingerprint; + + @Column + private Timestamp createDate; + + @Column + private Timestamp lastOpDate; + + @PreUpdate + private void preUpdate() { + lastOpDate = null; + } + + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public String getPublicKey() { + return publicKey; + } + + public void setPublicKey(String publicKey) { + this.publicKey = publicKey; + } + + public String getFingerprint() { + return fingerprint; + } + + public void setFingerprint(String fingerprint) { + this.fingerprint = fingerprint; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } +} diff --git a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java new file mode 100644 index 00000000000..a43d01375a0 --- /dev/null +++ b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java @@ -0,0 +1,14 @@ +package org.zstack.header.host; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(HostKeyIdentityVO.class) +public class HostKeyIdentityVO_ { + public static volatile SingularAttribute hostUuid; + public static volatile SingularAttribute publicKey; + public static volatile SingularAttribute fingerprint; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/sdk/src/main/java/SourceClassMap.java b/sdk/src/main/java/SourceClassMap.java index 821f401de47..3829d9077ac 100644 --- a/sdk/src/main/java/SourceClassMap.java +++ b/sdk/src/main/java/SourceClassMap.java @@ -178,6 +178,11 @@ public class SourceClassMap { put("org.zstack.header.image.APIGetUploadImageJobDetailsReply$JobDetails", "org.zstack.sdk.JobDetails"); put("org.zstack.header.image.ImageBackupStorageRefInventory", "org.zstack.sdk.ImageBackupStorageRefInventory"); put("org.zstack.header.image.ImageInventory", "org.zstack.sdk.ImageInventory"); + put("org.zstack.header.keyprovider.KeyProviderInventory", "org.zstack.sdk.KeyProviderInventory"); + put("org.zstack.header.keyprovider.KmsIdentityInventory", "org.zstack.sdk.KmsIdentityInventory"); + put("org.zstack.header.keyprovider.KmsInventory", "org.zstack.sdk.KmsInventory"); + put("org.zstack.header.keyprovider.NkpInventory", "org.zstack.sdk.NkpInventory"); + put("org.zstack.header.keyprovider.NkpRestoreInfo", "org.zstack.sdk.NkpRestoreInfo"); put("org.zstack.header.longjob.LongJobInventory", "org.zstack.sdk.LongJobInventory"); put("org.zstack.header.longjob.LongJobState", "org.zstack.sdk.LongJobState"); put("org.zstack.header.managementnode.ManagementNodeInventory", "org.zstack.sdk.ManagementNodeInventory"); @@ -882,6 +887,9 @@ public class SourceClassMap { put("org.zstack.sdk.KVMCephVolumeTO", "org.zstack.storage.ceph.primary.KVMCephVolumeTO"); put("org.zstack.sdk.KVMHostInventory", "org.zstack.kvm.KVMHostInventory"); put("org.zstack.sdk.KVMIsoTO", "org.zstack.kvm.KVMIsoTO"); + put("org.zstack.sdk.KeyProviderInventory", "org.zstack.header.keyprovider.KeyProviderInventory"); + put("org.zstack.sdk.KmsIdentityInventory", "org.zstack.header.keyprovider.KmsIdentityInventory"); + put("org.zstack.sdk.KmsInventory", "org.zstack.header.keyprovider.KmsInventory"); put("org.zstack.sdk.KvmCephCdRomTO", "org.zstack.storage.ceph.primary.KvmCephCdRomTO"); put("org.zstack.sdk.KvmCephIsoTO", "org.zstack.storage.ceph.primary.KvmCephIsoTO"); put("org.zstack.sdk.KvmHostHypervisorMetadataInventory", "org.zstack.kvm.hypervisor.datatype.KvmHostHypervisorMetadataInventory"); @@ -951,6 +959,8 @@ public class SourceClassMap { put("org.zstack.sdk.NetworkServiceL3NetworkRefInventory", "org.zstack.header.network.service.NetworkServiceL3NetworkRefInventory"); put("org.zstack.sdk.NetworkServiceProviderInventory", "org.zstack.header.network.service.NetworkServiceProviderInventory"); put("org.zstack.sdk.NicTO", "org.zstack.kvm.KVMAgentCommands$NicTO"); + put("org.zstack.sdk.NkpInventory", "org.zstack.header.keyprovider.NkpInventory"); + put("org.zstack.sdk.NkpRestoreInfo", "org.zstack.header.keyprovider.NkpRestoreInfo"); put("org.zstack.sdk.NormalIpRangeInventory", "org.zstack.header.network.l3.NormalIpRangeInventory"); put("org.zstack.sdk.NvmeLunHostRefInventory", "org.zstack.storage.device.nvme.NvmeLunHostRefInventory"); put("org.zstack.sdk.NvmeLunInventory", "org.zstack.storage.device.nvme.NvmeLunInventory"); diff --git a/sdk/src/main/java/org/zstack/sdk/KeyProviderInventory.java b/sdk/src/main/java/org/zstack/sdk/KeyProviderInventory.java new file mode 100644 index 00000000000..a2eef45777e --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/KeyProviderInventory.java @@ -0,0 +1,63 @@ +package org.zstack.sdk; + + + +public class KeyProviderInventory { + + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; + } + public java.lang.String getUuid() { + return this.uuid; + } + + public java.lang.String name; + public void setName(java.lang.String name) { + this.name = name; + } + public java.lang.String getName() { + return this.name; + } + + public java.lang.String description; + public void setDescription(java.lang.String description) { + this.description = description; + } + public java.lang.String getDescription() { + return this.description; + } + + public java.lang.String type; + public void setType(java.lang.String type) { + this.type = type; + } + public java.lang.String getType() { + return this.type; + } + + public boolean connected; + public void setConnected(boolean connected) { + this.connected = connected; + } + public boolean getConnected() { + return this.connected; + } + + public java.sql.Timestamp createDate; + public void setCreateDate(java.sql.Timestamp createDate) { + this.createDate = createDate; + } + public java.sql.Timestamp getCreateDate() { + return this.createDate; + } + + public java.sql.Timestamp lastOpDate; + public void setLastOpDate(java.sql.Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + public java.sql.Timestamp getLastOpDate() { + return this.lastOpDate; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/KmsIdentityInventory.java b/sdk/src/main/java/org/zstack/sdk/KmsIdentityInventory.java new file mode 100644 index 00000000000..d621aa1d749 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/KmsIdentityInventory.java @@ -0,0 +1,71 @@ +package org.zstack.sdk; + + + +public class KmsIdentityInventory { + + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; + } + public java.lang.String getUuid() { + return this.uuid; + } + + public java.lang.String kmsUuid; + public void setKmsUuid(java.lang.String kmsUuid) { + this.kmsUuid = kmsUuid; + } + public java.lang.String getKmsUuid() { + return this.kmsUuid; + } + + public java.lang.String identityType; + public void setIdentityType(java.lang.String identityType) { + this.identityType = identityType; + } + public java.lang.String getIdentityType() { + return this.identityType; + } + + public java.lang.String clientCertPem; + public void setClientCertPem(java.lang.String clientCertPem) { + this.clientCertPem = clientCertPem; + } + public java.lang.String getClientCertPem() { + return this.clientCertPem; + } + + public java.lang.String csrPem; + public void setCsrPem(java.lang.String csrPem) { + this.csrPem = csrPem; + } + public java.lang.String getCsrPem() { + return this.csrPem; + } + + public java.sql.Timestamp certExpiredDate; + public void setCertExpiredDate(java.sql.Timestamp certExpiredDate) { + this.certExpiredDate = certExpiredDate; + } + public java.sql.Timestamp getCertExpiredDate() { + return this.certExpiredDate; + } + + public java.sql.Timestamp createDate; + public void setCreateDate(java.sql.Timestamp createDate) { + this.createDate = createDate; + } + public java.sql.Timestamp getCreateDate() { + return this.createDate; + } + + public java.sql.Timestamp lastOpDate; + public void setLastOpDate(java.sql.Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + public java.sql.Timestamp getLastOpDate() { + return this.lastOpDate; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/KmsInventory.java b/sdk/src/main/java/org/zstack/sdk/KmsInventory.java new file mode 100644 index 00000000000..fb544369699 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/KmsInventory.java @@ -0,0 +1,71 @@ +package org.zstack.sdk; + +import org.zstack.sdk.KmsIdentityInventory; + +public class KmsInventory extends org.zstack.sdk.KeyProviderInventory { + + public java.lang.String endpoint; + public void setEndpoint(java.lang.String endpoint) { + this.endpoint = endpoint; + } + public java.lang.String getEndpoint() { + return this.endpoint; + } + + public java.lang.Integer port; + public void setPort(java.lang.Integer port) { + this.port = port; + } + public java.lang.Integer getPort() { + return this.port; + } + + public java.lang.String kmipVersion; + public void setKmipVersion(java.lang.String kmipVersion) { + this.kmipVersion = kmipVersion; + } + public java.lang.String getKmipVersion() { + return this.kmipVersion; + } + + public java.lang.String username; + public void setUsername(java.lang.String username) { + this.username = username; + } + public java.lang.String getUsername() { + return this.username; + } + + public boolean trusted; + public void setTrusted(boolean trusted) { + this.trusted = trusted; + } + public boolean getTrusted() { + return this.trusted; + } + + public java.lang.String activeIdentityUuid; + public void setActiveIdentityUuid(java.lang.String activeIdentityUuid) { + this.activeIdentityUuid = activeIdentityUuid; + } + public java.lang.String getActiveIdentityUuid() { + return this.activeIdentityUuid; + } + + public java.sql.Timestamp serverCertExpiredDate; + public void setServerCertExpiredDate(java.sql.Timestamp serverCertExpiredDate) { + this.serverCertExpiredDate = serverCertExpiredDate; + } + public java.sql.Timestamp getServerCertExpiredDate() { + return this.serverCertExpiredDate; + } + + public KmsIdentityInventory activeIdentity; + public void setActiveIdentity(KmsIdentityInventory activeIdentity) { + this.activeIdentity = activeIdentity; + } + public KmsIdentityInventory getActiveIdentity() { + return this.activeIdentity; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/NkpInventory.java b/sdk/src/main/java/org/zstack/sdk/NkpInventory.java new file mode 100644 index 00000000000..b76e38f3313 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/NkpInventory.java @@ -0,0 +1,39 @@ +package org.zstack.sdk; + + + +public class NkpInventory extends org.zstack.sdk.KeyProviderInventory { + + public java.lang.String kdf; + public void setKdf(java.lang.String kdf) { + this.kdf = kdf; + } + public java.lang.String getKdf() { + return this.kdf; + } + + public java.lang.String saltPolicy; + public void setSaltPolicy(java.lang.String saltPolicy) { + this.saltPolicy = saltPolicy; + } + public java.lang.String getSaltPolicy() { + return this.saltPolicy; + } + + public boolean backedUp; + public void setBackedUp(boolean backedUp) { + this.backedUp = backedUp; + } + public boolean getBackedUp() { + return this.backedUp; + } + + public java.lang.Integer currentVersion; + public void setCurrentVersion(java.lang.Integer currentVersion) { + this.currentVersion = currentVersion; + } + public java.lang.Integer getCurrentVersion() { + return this.currentVersion; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java b/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java new file mode 100644 index 00000000000..ae041ab435b --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java @@ -0,0 +1,63 @@ +package org.zstack.sdk; + + + +public class NkpRestoreInfo { + + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; + } + public java.lang.String getUuid() { + return this.uuid; + } + + public java.lang.String name; + public void setName(java.lang.String name) { + this.name = name; + } + public java.lang.String getName() { + return this.name; + } + + public java.lang.String description; + public void setDescription(java.lang.String description) { + this.description = description; + } + public java.lang.String getDescription() { + return this.description; + } + + public java.lang.String kdf; + public void setKdf(java.lang.String kdf) { + this.kdf = kdf; + } + public java.lang.String getKdf() { + return this.kdf; + } + + public java.lang.String saltPolicy; + public void setSaltPolicy(java.lang.String saltPolicy) { + this.saltPolicy = saltPolicy; + } + public java.lang.String getSaltPolicy() { + return this.saltPolicy; + } + + public java.lang.String encryptedMasterSeed; + public void setEncryptedMasterSeed(java.lang.String encryptedMasterSeed) { + this.encryptedMasterSeed = encryptedMasterSeed; + } + public java.lang.String getEncryptedMasterSeed() { + return this.encryptedMasterSeed; + } + + public java.lang.Integer currentVersion; + public void setCurrentVersion(java.lang.Integer currentVersion) { + this.currentVersion = currentVersion; + } + public java.lang.Integer getCurrentVersion() { + return this.currentVersion; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderAction.java new file mode 100644 index 00000000000..e3d50dc92c0 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderAction.java @@ -0,0 +1,75 @@ +package org.zstack.sdk.keyprovider.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class QueryKeyProviderAction extends QueryAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.api.QueryKeyProviderResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.api.QueryKeyProviderResult value = res.getResult(org.zstack.sdk.keyprovider.api.QueryKeyProviderResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.api.QueryKeyProviderResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "GET"; + info.path = "/key-providers"; + info.needSession = true; + info.needPoll = false; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderResult.java new file mode 100644 index 00000000000..b331fb5348a --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/QueryKeyProviderResult.java @@ -0,0 +1,22 @@ +package org.zstack.sdk.keyprovider.api; + + + +public class QueryKeyProviderResult { + public java.util.List inventories; + public void setInventories(java.util.List inventories) { + this.inventories = inventories; + } + public java.util.List getInventories() { + return this.inventories; + } + + public java.lang.Long total; + public void setTotal(java.lang.Long total) { + this.total = total; + } + public java.lang.Long getTotal() { + return this.total; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java new file mode 100644 index 00000000000..2d005f5c3dc --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class RekeyKeyProviderRefsAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.util.List refIds; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String providerUuid; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsResult value = res.getResult(org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/{providerUuid}/rekey"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "rekeyKeyProviderRefs"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsResult.java new file mode 100644 index 00000000000..ffb0dd51c80 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsResult.java @@ -0,0 +1,7 @@ +package org.zstack.sdk.keyprovider.api; + + + +public class RekeyKeyProviderRefsResult { + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java new file mode 100644 index 00000000000..827c6d5fa38 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java @@ -0,0 +1,128 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class CreateKmsAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.CreateKmsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String endpoint; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, numberRange = {1L,65535L}, noTrim = false) + public java.lang.Integer port; + + @Param(required = false, validValues = {"1.0","1.1","1.2","1.3","1.4","2.0","2.1"}, maxLength = 32, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String kmipVersion = "1.2"; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String username; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String password; + + @Param(required = true, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String name; + + @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String description; + + @Param(required = false) + public java.lang.String type; + + @Param(required = false) + public java.lang.String resourceUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.util.List tagUuids; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.CreateKmsResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.CreateKmsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.CreateKmsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "POST"; + info.path = "/key-providers/kms"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "params"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsResult.java new file mode 100644 index 00000000000..ae1e24307a1 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KeyProviderInventory; + +public class CreateKmsResult { + public KeyProviderInventory inventory; + public void setInventory(KeyProviderInventory inventory) { + this.inventory = inventory; + } + public KeyProviderInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsAction.java new file mode 100644 index 00000000000..e724f5bb544 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class DeleteKmsAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.DeleteKmsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String uuid; + + @Param(required = false) + public java.lang.String deleteMode = "Permissive"; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.DeleteKmsResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.DeleteKmsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.DeleteKmsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "DELETE"; + info.path = "/key-providers/kms/{uuid}"; + info.needSession = true; + info.needPoll = true; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsResult.java new file mode 100644 index 00000000000..a88e9a3559e --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/DeleteKmsResult.java @@ -0,0 +1,7 @@ +package org.zstack.sdk.keyprovider.kms.api; + + + +public class DeleteKmsResult { + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsAction.java new file mode 100644 index 00000000000..184ed6729d0 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsAction.java @@ -0,0 +1,75 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class QueryKmsAction extends QueryAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.QueryKmsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.QueryKmsResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.QueryKmsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.QueryKmsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "GET"; + info.path = "/key-providers/kms"; + info.needSession = true; + info.needPoll = false; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsResult.java new file mode 100644 index 00000000000..d0d76d2f982 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/QueryKmsResult.java @@ -0,0 +1,22 @@ +package org.zstack.sdk.keyprovider.kms.api; + + + +public class QueryKmsResult { + public java.util.List inventories; + public void setInventories(java.util.List inventories) { + this.inventories = inventories; + } + public java.util.List getInventories() { + return this.inventories; + } + + public java.lang.Long total; + public void setTotal(java.lang.Long total) { + this.total = total; + } + public java.lang.Long getTotal() { + return this.total; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsAction.java new file mode 100644 index 00000000000..566dd7087f0 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsAction.java @@ -0,0 +1,119 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UpdateKmsAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.UpdateKmsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String endpoint; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = false, numberRange = {1L,65535L}, noTrim = false) + public java.lang.Integer port; + + @Param(required = false, validValues = {"1.0","1.1","1.2","1.3","1.4","2.0","2.1"}, maxLength = 32, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String kmipVersion; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String username; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String password; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String description; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.UpdateKmsResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.UpdateKmsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.UpdateKmsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "updateKms"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsResult.java new file mode 100644 index 00000000000..8f48d409109 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UpdateKmsResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KeyProviderInventory; + +public class UpdateKmsResult { + public KeyProviderInventory inventory; + public void setInventory(KeyProviderInventory inventory) { + this.inventory = inventory; + } + public KeyProviderInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpAction.java new file mode 100644 index 00000000000..e4e996818c1 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class BackupNkpAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.BackupNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String password; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.BackupNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.BackupNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.BackupNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/nkp/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "backupNkp"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpResult.java new file mode 100644 index 00000000000..a8880061e50 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/BackupNkpResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.nkp.api; + + + +public class BackupNkpResult { + public java.lang.String content; + public void setContent(java.lang.String content) { + this.content = content; + } + public java.lang.String getContent() { + return this.content; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java new file mode 100644 index 00000000000..45ca9d824c9 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java @@ -0,0 +1,119 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class CreateNkpAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.CreateNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = false, validValues = {"HKDF-SHA256"}, maxLength = 64, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String kdf = "HKDF-SHA256"; + + @Param(required = false, validValues = {"providerName"}, maxLength = 64, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String saltPolicy = "providerName"; + + @Param(required = true, maxLength = 255, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String name; + + @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String description; + + @Param(required = false) + public java.lang.String type; + + @Param(required = false) + public java.lang.String resourceUuid; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.util.List tagUuids; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.CreateNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.CreateNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.CreateNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "POST"; + info.path = "/key-providers/nkp"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "params"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpResult.java new file mode 100644 index 00000000000..e00c3112235 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import org.zstack.sdk.KeyProviderInventory; + +public class CreateNkpResult { + public KeyProviderInventory inventory; + public void setInventory(KeyProviderInventory inventory) { + this.inventory = inventory; + } + public KeyProviderInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpAction.java new file mode 100644 index 00000000000..7e7b433eaf6 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class DeleteNkpAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.DeleteNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String uuid; + + @Param(required = false) + public java.lang.String deleteMode = "Permissive"; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.DeleteNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.DeleteNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.DeleteNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "DELETE"; + info.path = "/key-providers/nkp/{uuid}"; + info.needSession = true; + info.needPoll = true; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpResult.java new file mode 100644 index 00000000000..3dbd48e495b --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/DeleteNkpResult.java @@ -0,0 +1,7 @@ +package org.zstack.sdk.keyprovider.nkp.api; + + + +public class DeleteNkpResult { + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreAction.java new file mode 100644 index 00000000000..db7265e783f --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class ParseNkpRestoreAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String contentBase64; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String password; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/nkp/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "parseNkpRestore"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java new file mode 100644 index 00000000000..2fee861ac11 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import org.zstack.sdk.NkpRestoreInfo; + +public class ParseNkpRestoreResult { + public NkpRestoreInfo restoreInfo; + public void setRestoreInfo(NkpRestoreInfo restoreInfo) { + this.restoreInfo = restoreInfo; + } + public NkpRestoreInfo getRestoreInfo() { + return this.restoreInfo; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpAction.java new file mode 100644 index 00000000000..6eb869c7821 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpAction.java @@ -0,0 +1,75 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class QueryNkpAction extends QueryAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.QueryNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.QueryNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.QueryNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.QueryNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "GET"; + info.path = "/key-providers/nkp"; + info.needSession = true; + info.needPoll = false; + info.parameterName = ""; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpResult.java new file mode 100644 index 00000000000..3a1b8f44abd --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/QueryNkpResult.java @@ -0,0 +1,22 @@ +package org.zstack.sdk.keyprovider.nkp.api; + + + +public class QueryNkpResult { + public java.util.List inventories; + public void setInventories(java.util.List inventories) { + this.inventories = inventories; + } + public java.util.List getInventories() { + return this.inventories; + } + + public java.lang.Long total; + public void setTotal(java.lang.Long total) { + this.total = total; + } + public java.lang.Long getTotal() { + return this.total; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpAction.java new file mode 100644 index 00000000000..f7c44403fe8 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class RestoreNkpAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.RestoreNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String contentBase64; + + @Param(required = false, maxLength = 255, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String password; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.RestoreNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.RestoreNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.RestoreNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/nkp/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "restoreNkp"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpResult.java new file mode 100644 index 00000000000..812f84e8fa9 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/RestoreNkpResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import org.zstack.sdk.NkpInventory; + +public class RestoreNkpResult { + public NkpInventory inventory; + public void setInventory(NkpInventory inventory) { + this.inventory = inventory; + } + public NkpInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpAction.java new file mode 100644 index 00000000000..3020624f823 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UpdateNkpAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.nkp.api.UpdateNkpResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public java.lang.String description; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.nkp.api.UpdateNkpResult value = res.getResult(org.zstack.sdk.keyprovider.nkp.api.UpdateNkpResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.nkp.api.UpdateNkpResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/nkp/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "updateNkp"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpResult.java new file mode 100644 index 00000000000..b7302e2c291 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/UpdateNkpResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.nkp.api; + +import org.zstack.sdk.KeyProviderInventory; + +public class UpdateNkpResult { + public KeyProviderInventory inventory; + public void setInventory(KeyProviderInventory inventory) { + this.inventory = inventory; + } + public KeyProviderInventory getInventory() { + return this.inventory; + } + +} diff --git a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy index 51b3f4811da..cfd879afb45 100644 --- a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy +++ b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy @@ -36498,6 +36498,363 @@ abstract class ApiHelper { } + def queryKeyProvider(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.api.QueryKeyProviderAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.api.QueryKeyProviderAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + a.conditions = a.conditions.collect { it.toString() } + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def rekeyKeyProviderRefs(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.api.RekeyKeyProviderRefsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def createKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.CreateKmsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.CreateKmsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def deleteKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.DeleteKmsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.DeleteKmsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def queryKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.QueryKmsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.QueryKmsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + a.conditions = a.conditions.collect { it.toString() } + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def updateKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.UpdateKmsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.UpdateKmsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def backupNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.BackupNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.BackupNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def createNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.CreateNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.CreateNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def deleteNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.DeleteNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.DeleteNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def parseNkpRestore(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.ParseNkpRestoreAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def queryNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.QueryNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.QueryNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + a.conditions = a.conditions.collect { it.toString() } + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def restoreNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.RestoreNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.RestoreNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def updateNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.UpdateNkpAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.nkp.api.UpdateNkpAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def getManagementNodesStatus(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.managements.common.GetManagementNodesStatusAction.class) Closure c) { def a = new org.zstack.sdk.managements.common.GetManagementNodesStatusAction() a.sessionId = Test.currentEnvSpec?.session?.uuid From e220fc56fe47b6110f3926ec7b7442ccd9e7c92b Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 11 Feb 2026 15:35:53 +0800 Subject: [PATCH 26/76] [storage]: improve error message in VolumeSnapshotApiInterceptor This patch is for zsv_5.0.0 Related: ZSV-11338 Change-Id: I666c7763796f7070796762767669786e6e75736b --- .../snapshot/VolumeSnapshotConstant.java | 12 ++++++++++ .../VolumeSnapshotApiInterceptor.java | 22 ++++++++++--------- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/header/src/main/java/org/zstack/header/storage/snapshot/VolumeSnapshotConstant.java b/header/src/main/java/org/zstack/header/storage/snapshot/VolumeSnapshotConstant.java index 8f784a72051..adb5c5ea436 100755 --- a/header/src/main/java/org/zstack/header/storage/snapshot/VolumeSnapshotConstant.java +++ b/header/src/main/java/org/zstack/header/storage/snapshot/VolumeSnapshotConstant.java @@ -1,5 +1,10 @@ package org.zstack.header.storage.snapshot; +import org.zstack.header.vm.VmInstanceState; + +import java.util.Arrays; +import java.util.List; + /** */ public interface VolumeSnapshotConstant { @@ -16,4 +21,11 @@ public interface VolumeSnapshotConstant { String VOLUME_SNAPSHOT_STRUCT = "VolumeSnapshotStruct"; String NEED_TAKE_SNAPSHOTS_ON_HYPERVISOR = "needTakeSnapshotOnHypervisor"; String NEED_BLOCK_STREAM_ON_HYPERVISOR = "needBlockStreamOnHypervisor"; + + public static final List ALLOW_TAKE_SNAPSHOTS_VM_STATES = Arrays.asList( + VmInstanceState.Running, VmInstanceState.Stopped, VmInstanceState.Paused + ); + public static final List ALLOW_TAKE_MEMORY_SNAPSHOTS_VM_STATES = Arrays.asList( + VmInstanceState.Stopped + ); } diff --git a/storage/src/main/java/org/zstack/storage/snapshot/VolumeSnapshotApiInterceptor.java b/storage/src/main/java/org/zstack/storage/snapshot/VolumeSnapshotApiInterceptor.java index 1da009fc2d9..417fdb4c390 100755 --- a/storage/src/main/java/org/zstack/storage/snapshot/VolumeSnapshotApiInterceptor.java +++ b/storage/src/main/java/org/zstack/storage/snapshot/VolumeSnapshotApiInterceptor.java @@ -5,7 +5,6 @@ import org.zstack.core.componentloader.PluginRegistry; import org.zstack.core.db.DatabaseFacade; import org.zstack.core.db.Q; -import org.zstack.core.db.SQL; import org.zstack.core.db.SimpleQuery; import org.zstack.core.db.SimpleQuery.Op; import org.zstack.core.errorcode.ErrorFacade; @@ -16,7 +15,6 @@ import org.zstack.header.message.APIMessage; import org.zstack.header.storage.snapshot.*; import org.zstack.header.storage.snapshot.group.*; -import org.zstack.header.vm.VmInstanceInventory; import org.zstack.header.vm.VmInstanceState; import org.zstack.header.vm.VmInstanceVO; import org.zstack.header.vm.VmInstanceVO_; @@ -29,7 +27,6 @@ import static org.zstack.storage.snapshot.VolumeSnapshotMessageRouter.getResourceIdToRouteMsg; import javax.persistence.Tuple; -import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @@ -113,13 +110,18 @@ private void validate(APIRevertVmFromSnapshotGroupMsg msg) { } } - if (isWithMemoryForSnapshotGroup(group) - && Q.New(VmInstanceVO.class) - .eq(VmInstanceVO_.uuid, group.getVmInstanceUuid()) - .in(VmInstanceVO_.state, Arrays.asList(VmInstanceState.Running, VmInstanceState.Paused)) - .isExists()) { - throw new ApiMessageInterceptionException(argerr("Can not take memory snapshot, expected vm states are [%s, %s]", - VmInstanceState.Running.toString(), VmInstanceState.Paused.toString())); + if (isWithMemoryForSnapshotGroup(group)) { + VmInstanceState vmState = Q.New(VmInstanceVO.class) + .select(VmInstanceVO_.state) + .eq(VmInstanceVO_.uuid, group.getVmInstanceUuid()) + .findValue(); + if (!VolumeSnapshotConstant.ALLOW_TAKE_MEMORY_SNAPSHOTS_VM_STATES.contains(vmState)) { + throw new ApiMessageInterceptionException(argerr( + "Can not revert VM with memory snapshot: unexpected VM state") + .withOpaque("vm.uuid", group.getVmInstanceUuid()) + .withOpaque("vm.state", vmState.toString()) + .withOpaque("expect.states", VolumeSnapshotConstant.ALLOW_TAKE_MEMORY_SNAPSHOTS_VM_STATES)); + } } msg.setVmInstanceUuid(group.getVmInstanceUuid()); From fba75d41f37f5401c3c4fc1a5ae891f4e59b797f Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 11 Feb 2026 18:55:48 +0800 Subject: [PATCH 27/76] [kvm]: update KVMGlobalConfig default value to "None" Resolves: ZSV-11340 Related: ZSV-11010 Change-Id: I63646f6264697877707967746f69756e776f6c71 --- plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java index 4a724abac2a..1b6e5bce8aa 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java @@ -160,8 +160,8 @@ public class KVMGlobalConfig { ) public static GlobalConfig KVMAGENT_PHYSICAL_MEMORY_USAGE_HARD_LIMIT = new GlobalConfig(CATEGORY, "kvmagent.physicalmemory.usage.hardlimit"); - @GlobalConfigDef(defaultValue = "", - description = "Specify the EDK version to be used for the next VM startup. Default empty string indicates the use of the system's default EDK version") + @GlobalConfigDef(defaultValue = "None", + description = "Specify the EDK version to be used for the next VM startup. None indicates the use of the system's default EDK version") @BindResourceConfig(value = {VmInstanceVO.class, ClusterVO.class}) public static GlobalConfig VM_EDK_VERSION_CONFIG = new GlobalConfig(CATEGORY, "vm.edk.version"); } From 0cc0e4298fe5cd2fd3abf1559555ae072a2f5823 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 12 Feb 2026 14:40:34 +0800 Subject: [PATCH 28/76] [kvm]: continue to update KVMGlobalConfig default value to "None" Resolves: ZSV-11340 Related: ZSV-11010 Change-Id: I63646f6264697877707967746f69756e776f6c73 --- plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java | 2 ++ .../kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java | 4 +++- .../java/org/zstack/kvm/efi/KvmSecureBootExtensions.java | 6 +++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index 7cd78c36c93..cb79da59838 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -183,6 +183,8 @@ public interface KVMConstant { public static final String L2_PROVIDER_TYPE_OVS_DPDK = "OvsDpdk"; public static final String L2_PROVIDER_TYPE_MACVLAN = "MacVlan"; + public static final String EDK_VERSION_NONE = "None"; + public static final String DHCP_BIN_FILE_PATH = "/usr/local/zstack/dnsmasq"; enum KvmVmState { diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java index 1b6e5bce8aa..96caaa0e5b6 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMGlobalConfig.java @@ -11,6 +11,8 @@ import org.zstack.header.host.HostVO; import org.zstack.header.zone.ZoneVO; +import static org.zstack.kvm.KVMConstant.EDK_VERSION_NONE; + /** */ @GlobalConfigDefinition @@ -160,7 +162,7 @@ public class KVMGlobalConfig { ) public static GlobalConfig KVMAGENT_PHYSICAL_MEMORY_USAGE_HARD_LIMIT = new GlobalConfig(CATEGORY, "kvmagent.physicalmemory.usage.hardlimit"); - @GlobalConfigDef(defaultValue = "None", + @GlobalConfigDef(defaultValue = EDK_VERSION_NONE, description = "Specify the EDK version to be used for the next VM startup. None indicates the use of the system's default EDK version") @BindResourceConfig(value = {VmInstanceVO.class, ClusterVO.class}) public static GlobalConfig VM_EDK_VERSION_CONFIG = new GlobalConfig(CATEGORY, "vm.edk.version"); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 1ff53a531bc..138fbda2d6d 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -14,6 +14,10 @@ import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; +import java.util.Objects; + +import static org.zstack.kvm.KVMConstant.EDK_VERSION_NONE; + public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint { private static final CLogger logger = Utils.getLogger(KvmSecureBootExtensions.class); @@ -32,7 +36,7 @@ public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAg resourceConfig = resourceConfigFacade.getResourceConfig(KVMGlobalConfig.VM_EDK_VERSION_CONFIG.getIdentity()); final String edkVersion = resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), String.class); - if (edkVersion != null && !edkVersion.isEmpty()) { + if (!Objects.equals(edkVersion, EDK_VERSION_NONE)) { cmd.setEdkVersion(edkVersion); } } From 176a88c007a8753101b0b1988396ca0a7b2448ee Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 24 Feb 2026 16:15:59 +0800 Subject: [PATCH 29/76] [conf]: update default value of secure.boot global config to false In previous versions, the global config "enable.uefi.secure.boot" had no practical effect, as its default value was "true". In the current version, since it affects numerous configurations, its default value has been changed to "false" to maintain consistency with VM startup settings from previous versions and minimize the risk of VM configuration changes. Related: ZSV-11310 Change-Id: I616c796e6e796a726b6f7a77656975717a65726c --- conf/globalConfig/vm.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/globalConfig/vm.xml b/conf/globalConfig/vm.xml index 8563169b335..2542bc2cb03 100755 --- a/conf/globalConfig/vm.xml +++ b/conf/globalConfig/vm.xml @@ -289,7 +289,7 @@ enable.uefi.secure.boot enable uefi secure boot - true + false vm java.lang.Boolean From 5406edf6587555c763221789d38969fb8fc4ab04 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Fri, 13 Feb 2026 14:59:35 +0800 Subject: [PATCH 30/76] [compute]: introduce NvRam type volume If TPM or secure boot is enabled, MN will prepare NvRam volume and instantiate volume to host. Add delete extension to delete NvRam volume after related VM deleted. Resolves: ZSV-11310 Related: ZSPHER-1 Change-Id: I787672786e6873696c6273647778737364767563 --- .../zstack/compute/vm/VmDeleteVolumeFlow.java | 11 +- .../compute/vm/VmExpungeNvRamVolumeFlow.java | 83 +++++ .../org/zstack/compute/vm/VmInstanceBase.java | 2 +- .../compute/vm/devices/VmTpmExtensions.java | 21 +- .../compute/vm/devices/VmTpmManager.java | 37 +++ conf/persistence.xml | 2 + conf/springConfigXml/Kvm.xml | 1 + conf/springConfigXml/VmInstanceManager.xml | 2 + .../zstack/header/vm/VmInstanceConstant.java | 3 + .../org/zstack/header/vm/VmInstanceSpec.java | 9 + .../org/zstack/header/volume/VolumeType.java | 3 +- .../java/org/zstack/kvm/KVMAgentCommands.java | 9 + .../kvm/efi/KvmSecureBootExtensions.java | 289 +++++++++++++++++- .../primary/local/LocalStorageKvmBackend.java | 6 + .../primary/PrimaryStoragePathMaker.java | 4 + .../org/zstack/storage/volume/VolumeBase.java | 5 +- .../test/resources/springConfigXml/Kvm.xml | 1 + 17 files changed, 469 insertions(+), 19 deletions(-) create mode 100644 compute/src/main/java/org/zstack/compute/vm/VmExpungeNvRamVolumeFlow.java diff --git a/compute/src/main/java/org/zstack/compute/vm/VmDeleteVolumeFlow.java b/compute/src/main/java/org/zstack/compute/vm/VmDeleteVolumeFlow.java index b791ecb1658..a9dac33e70f 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmDeleteVolumeFlow.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmDeleteVolumeFlow.java @@ -49,7 +49,9 @@ public void run(final FlowTrigger trigger, Map data) { final boolean templated = isTemplated(spec.getVmInventory().getUuid()); /* data volume must be detached anyway no matter if it is going to be deleted */ - if (spec.getVmInventory().getAllVolumes().size() > 1) { + boolean anyDataVolume = spec.getVmInventory().getAllVolumes().stream() + .anyMatch(arg -> VolumeType.Data.toString().equals(arg.getType())); + if (anyDataVolume) { detachDataVolumes(spec); } @@ -60,7 +62,12 @@ public void run(final FlowTrigger trigger, Map data) { return; } - List volumeTypes = Arrays.asList(VolumeType.Root.toString(), VolumeType.Memory.toString(), VolumeType.Cache.toString()); + List volumeTypes = Arrays.asList( + VolumeType.Root.toString(), + VolumeType.Memory.toString(), + VolumeType.Cache.toString(), + VolumeType.NvRam.toString() + ); List ctx = transformAndRemoveNull(spec.getVmInventory().getAllVolumes(), arg -> { if (VolumeType.Data.toString().equals(arg.getType()) && !deleteDataDisk && !templated) { return null; diff --git a/compute/src/main/java/org/zstack/compute/vm/VmExpungeNvRamVolumeFlow.java b/compute/src/main/java/org/zstack/compute/vm/VmExpungeNvRamVolumeFlow.java new file mode 100644 index 00000000000..9b608817507 --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/VmExpungeNvRamVolumeFlow.java @@ -0,0 +1,83 @@ +package org.zstack.compute.vm; + +import org.springframework.beans.factory.annotation.Autowire; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Configurable; +import org.zstack.core.asyncbatch.While; +import org.zstack.core.cloudbus.CloudBus; +import org.zstack.core.cloudbus.CloudBusCallBack; +import org.zstack.core.db.Q; +import org.zstack.header.core.WhileDoneCompletion; +import org.zstack.header.core.workflow.FlowTrigger; +import org.zstack.header.core.workflow.NoRollbackFlow; +import org.zstack.header.errorcode.ErrorCodeList; +import org.zstack.header.message.MessageReply; +import org.zstack.header.vm.VmInstanceConstant; +import org.zstack.header.vm.VmInstanceSpec; +import org.zstack.header.volume.ExpungeVolumeMsg; +import org.zstack.header.volume.VolumeConstant; +import org.zstack.header.volume.VolumeType; +import org.zstack.header.volume.VolumeVO; +import org.zstack.header.volume.VolumeVO_; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +import java.util.List; +import java.util.Map; + +import static org.zstack.core.Platform.multiErr; + +@Configurable(preConstruction = true, autowire = Autowire.BY_TYPE) +public class VmExpungeNvRamVolumeFlow extends NoRollbackFlow { + private static final CLogger logger = Utils.getLogger(VmExpungeNvRamVolumeFlow.class); + + @Autowired + protected CloudBus bus; + + @Override + @SuppressWarnings("rawtypes") + public void run(FlowTrigger trigger, Map data) { + final VmInstanceSpec spec = (VmInstanceSpec) data.get(VmInstanceConstant.Params.VmInstanceSpec.toString()); + String vmUuid = spec.getVmInventory().getUuid(); + + List volumes = Q.New(VolumeVO.class) + .eq(VolumeVO_.vmInstanceUuid, vmUuid) + .eq(VolumeVO_.type, VolumeType.NvRam) + .list(); + if (volumes.isEmpty()) { + trigger.next(); + return; + } + + new While<>(volumes).each((vol, c) -> { + ExpungeVolumeMsg msg = new ExpungeVolumeMsg(); + msg.setVolumeUuid(vol.getUuid()); + bus.makeTargetServiceIdByResourceUuid(msg, VolumeConstant.SERVICE_ID, vol.getUuid()); + bus.send(msg, new CloudBusCallBack(c) { + @Override + public void run(MessageReply reply) { + if (!reply.isSuccess()) { + logger.warn(String.format("failed to expunge the NvRam volume[uuid:%s] of the vm[uuid:%s, name:%s]: %s", + vol.getUuid(), spec.getVmInventory().getUuid(), + spec.getVmInventory().getName(), reply.getError())); + + c.addError(reply.getError() + .withOpaque("volume.uuid", vol.getUuid())); + } + + c.done(); + } + }); + }).run(new WhileDoneCompletion(trigger) { + @Override + public void done(ErrorCodeList errorCodeList) { + if (!errorCodeList.getCauses().isEmpty()) { + trigger.fail(multiErr(errorCodeList.getCauses(), "failed to expunge the NvRam volumes")); + return; + } + + trigger.next(); + } + }); + } +} diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java index 9ad123a6ef1..49b076a2fc6 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceBase.java @@ -2593,7 +2593,7 @@ protected void scripts() { sql(VmNicVO.class).eq(VmNicVO_.vmInstanceUuid, self.getUuid()).hardDelete(); sql(VolumeVO.class).eq(VolumeVO_.vmInstanceUuid, self.getUuid()) - .eq(VolumeVO_.type, VolumeType.Root) + .in(VolumeVO_.type, list(VolumeType.Root, VolumeType.NvRam)) .hardDelete(); sql(VmCdRomVO.class).eq(VmCdRomVO_.vmInstanceUuid, self.getUuid()).hardDelete(); sql(VmInstanceVO.class).eq(VmInstanceVO_.uuid, self.getUuid()).hardDelete(); diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java index 4b25befa82f..6877b2680fc 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java @@ -1,12 +1,18 @@ package org.zstack.compute.vm.devices; import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.vm.BuildVmSpecExtensionPoint; import org.zstack.header.vm.CreateVmInstanceMsg; +import org.zstack.header.vm.DiskAO; import org.zstack.header.vm.VmInstanceCreateExtensionPoint; +import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstanceVO; import org.zstack.header.vm.devices.VmDevicesSpec; -public class VmTpmExtensions implements VmInstanceCreateExtensionPoint { +import static org.zstack.header.vm.VmInstanceConstant.NV_RAM_DEFAULT_SIZE; + +public class VmTpmExtensions implements VmInstanceCreateExtensionPoint, + BuildVmSpecExtensionPoint { @Autowired private VmTpmManager vmTpmManager; @@ -24,4 +30,17 @@ public void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) { vmTpmManager.persistTpmVO(null, vo.getUuid()); } + + @Override + public void afterBuildVmSpec(VmInstanceSpec spec) { + String vmUuid = spec.getVmInventory().getUuid(); + if (!vmTpmManager.needRegisterNvRam(vmUuid)) { + return; + } + + DiskAO nvRamSpec = new DiskAO(); + nvRamSpec.setSize(NV_RAM_DEFAULT_SIZE); + nvRamSpec.setName("NvRam-of-VM-" + vmUuid); + spec.setNvRamSpec(nvRamSpec); + } } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java index a37e57d8426..ced08da2918 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java @@ -1,17 +1,29 @@ package org.zstack.compute.vm.devices; import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.vm.VmSystemTags; import org.zstack.core.Platform; import org.zstack.core.db.DatabaseFacade; +import org.zstack.core.db.Q; +import org.zstack.header.image.ImageBootMode; import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; +import org.zstack.resourceconfig.ResourceConfig; +import org.zstack.resourceconfig.ResourceConfigFacade; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; +import java.util.Objects; + +import static org.zstack.compute.vm.VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT; + public class VmTpmManager { private static final CLogger logger = Utils.getLogger(VmTpmManager.class); @Autowired private DatabaseFacade databaseFacade; + @Autowired + private ResourceConfigFacade resourceConfigFacade; public TpmVO persistTpmVO(String tpmUuid, String vmUuid) { if (tpmUuid == null) { @@ -26,4 +38,29 @@ public TpmVO persistTpmVO(String tpmUuid, String vmUuid) { logger.debug("Persisted TpmVO for VM " + vmUuid + " with uuid=" + tpm.getUuid()); return tpm; } + + public boolean needRegisterNvRam(String vmUuid) { + boolean tpmExists = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, vmUuid) + .isExists(); + if (tpmExists) { + return true; + } + + String bootMode = VmSystemTags.BOOT_MODE.getTokenByResourceUuid(vmUuid, VmSystemTags.BOOT_MODE_TOKEN); + if (!isUefiBootMode(bootMode)) { + return false; + } + + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(ENABLE_UEFI_SECURE_BOOT.getIdentity()); + return resourceConfig.getResourceConfigValue(vmUuid, Boolean.class) == Boolean.TRUE; + } + + /** + * @param bootMode boot mode, null is Legacy + */ + public static boolean isUefiBootMode(String bootMode) { + return Objects.equals(bootMode, ImageBootMode.UEFI.toString()) + || Objects.equals(bootMode, ImageBootMode.UEFI_WITH_CSM.toString()); + } } diff --git a/conf/persistence.xml b/conf/persistence.xml index ff82022f554..5a1b855d9e8 100755 --- a/conf/persistence.xml +++ b/conf/persistence.xml @@ -18,6 +18,8 @@ org.zstack.resourceconfig.ResourceConfigVO org.zstack.header.managementnode.ManagementNodeVO org.zstack.header.managementnode.ManagementNodeContextVO + org.zstack.header.tpm.entity.TpmHostRefVO + org.zstack.header.tpm.entity.TpmVO org.zstack.header.zone.ZoneVO org.zstack.header.zone.ZoneEO org.zstack.header.cluster.ClusterVO diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 9536f559eb3..16cd80fadf9 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -261,6 +261,7 @@ + diff --git a/conf/springConfigXml/VmInstanceManager.xml b/conf/springConfigXml/VmInstanceManager.xml index 6786f86c780..0e82353d042 100755 --- a/conf/springConfigXml/VmInstanceManager.xml +++ b/conf/springConfigXml/VmInstanceManager.xml @@ -116,6 +116,7 @@ org.zstack.compute.vm.VmExpungeRootVolumeFlow + org.zstack.compute.vm.VmExpungeNvRamVolumeFlow org.zstack.compute.vm.VmExpungeMemoryVolumeFlow org.zstack.compute.vm.VmExpungeCacheVolumeFlow @@ -285,6 +286,7 @@ + diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java b/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java index 9d0efdd77f1..e767df877f1 100755 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java @@ -1,6 +1,7 @@ package org.zstack.header.vm; import org.zstack.header.configuration.PythonClass; +import org.zstack.utils.data.SizeUnit; @PythonClass public interface VmInstanceConstant { @@ -25,6 +26,8 @@ public interface VmInstanceConstant { String SHUTDOWN_DETAIL_BY_GUEST = "by guest"; String SHUTDOWN_DETAIL_FINISHED = "finished"; + long NV_RAM_DEFAULT_SIZE = SizeUnit.MEGABYTE.toByte(1); + enum Params { VmInstanceSpec, AttachingVolumeInventory, diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java b/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java index 18620a65fad..95ccdc22bf2 100755 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceSpec.java @@ -405,6 +405,7 @@ public void setCandidatePrimaryStorageUuidsForDataVolume(List candidateP private List deprecatedDisksSpecs = new ArrayList<>(); private VmCustomSpecificationStruct vmCustomSpecification; private VmDevicesSpec devicesSpec; + private DiskAO nvRamSpec; public DiskAO getRootDisk() { return rootDisk; @@ -446,6 +447,14 @@ public void setDevicesSpec(VmDevicesSpec devicesSpec) { this.devicesSpec = devicesSpec; } + public DiskAO getNvRamSpec() { + return nvRamSpec; + } + + public void setNvRamSpec(DiskAO nvRamSpec) { + this.nvRamSpec = nvRamSpec; + } + public boolean isSkipIpAllocation() { return skipIpAllocation; } diff --git a/header/src/main/java/org/zstack/header/volume/VolumeType.java b/header/src/main/java/org/zstack/header/volume/VolumeType.java index e373e5aef98..4266a51ccec 100755 --- a/header/src/main/java/org/zstack/header/volume/VolumeType.java +++ b/header/src/main/java/org/zstack/header/volume/VolumeType.java @@ -4,5 +4,6 @@ public enum VolumeType { Root, Data, Memory, - Cache + Cache, + NvRam, } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 6ec7e3429b7..16865309ec0 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -2068,6 +2068,7 @@ public static class StartVmCmd extends vdiCmd implements VmAddOnsCmd { private List cdRoms = new ArrayList<>(); private List dataVolumes; private List cacheVolumes; + private VolumeTO nvRam; private List nics; private TpmTO tpm; private long timeout; @@ -2549,6 +2550,14 @@ public void setCacheVolumes(List cacheVolumes) { this.cacheVolumes = cacheVolumes; } + public VolumeTO getNvRam() { + return nvRam; + } + + public void setNvRam(VolumeTO nvRam) { + this.nvRam = nvRam; + } + public List getNics() { return nics; } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 138fbda2d6d..8f2e33af8ff 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -2,43 +2,97 @@ import org.springframework.beans.factory.annotation.Autowired; import org.zstack.compute.vm.VmGlobalConfig; +import org.zstack.compute.vm.devices.VmTpmManager; +import org.zstack.core.cloudbus.CloudBus; +import org.zstack.core.cloudbus.CloudBusCallBack; +import org.zstack.core.db.Q; +import org.zstack.core.workflow.SimpleFlowChain; +import org.zstack.header.core.Completion; +import org.zstack.header.core.workflow.Flow; +import org.zstack.header.core.workflow.FlowDoneHandler; +import org.zstack.header.core.workflow.FlowErrorHandler; +import org.zstack.header.core.workflow.FlowRollback; +import org.zstack.header.core.workflow.FlowTrigger; +import org.zstack.header.core.workflow.NoRollbackFlow; import org.zstack.header.errorcode.ErrorCode; -import org.zstack.header.image.ImageBootMode; +import org.zstack.header.exception.CloudRuntimeException; +import org.zstack.header.identity.AccountResourceRefVO; +import org.zstack.header.identity.AccountResourceRefVO_; +import org.zstack.header.message.MessageReply; +import org.zstack.header.vm.DiskAO; +import org.zstack.header.vm.PreVmInstantiateResourceExtensionPoint; import org.zstack.header.vm.VmInstanceSpec; +import org.zstack.header.vm.VmInstantiateResourceException; +import org.zstack.header.volume.CreateVolumeMsg; +import org.zstack.header.volume.CreateVolumeReply; +import org.zstack.header.volume.DeleteVolumeMsg; +import org.zstack.header.volume.InstantiateVolumeMsg; +import org.zstack.header.volume.VolumeConstant; +import org.zstack.header.volume.VolumeDeletionPolicyManager; +import org.zstack.header.volume.VolumeInventory; +import org.zstack.header.volume.VolumeStatus; +import org.zstack.header.volume.VolumeType; +import org.zstack.header.volume.VolumeVO; +import org.zstack.header.volume.VolumeVO_; import org.zstack.kvm.KVMAgentCommands; import org.zstack.kvm.KVMGlobalConfig; import org.zstack.kvm.KVMHostInventory; import org.zstack.kvm.KVMStartVmExtensionPoint; +import org.zstack.kvm.VolumeTO; import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; +import javax.persistence.Tuple; +import java.util.Map; import java.util.Objects; +import static org.zstack.core.Platform.operr; import static org.zstack.kvm.KVMConstant.EDK_VERSION_NONE; -public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint { +public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint, + PreVmInstantiateResourceExtensionPoint { private static final CLogger logger = Utils.getLogger(KvmSecureBootExtensions.class); + @Autowired + private CloudBus bus; @Autowired private ResourceConfigFacade resourceConfigFacade; @Override public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { - if (!isUefiBootMode(cmd.getBootMode())) { - return; + if (isUefiBootMode(cmd.getBootMode())) { + ResourceConfig resourceConfig; + resourceConfig = resourceConfigFacade.getResourceConfig(VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT.getIdentity()); + cmd.setSecureBoot(resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), Boolean.class)); + + resourceConfig = resourceConfigFacade.getResourceConfig(KVMGlobalConfig.VM_EDK_VERSION_CONFIG.getIdentity()); + final String edkVersion = resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), String.class); + if (!Objects.equals(edkVersion, EDK_VERSION_NONE)) { + cmd.setEdkVersion(edkVersion); + } } - ResourceConfig resourceConfig; - resourceConfig = resourceConfigFacade.getResourceConfig(VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT.getIdentity()); - cmd.setSecureBoot(resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), Boolean.class)); + if (spec.getNvRamSpec() != null) { + prepareNvRamToStartVmCmd(cmd, spec.getNvRamSpec(), host); + } + } - resourceConfig = resourceConfigFacade.getResourceConfig(KVMGlobalConfig.VM_EDK_VERSION_CONFIG.getIdentity()); - final String edkVersion = resourceConfig.getResourceConfigValue(spec.getVmInventory().getUuid(), String.class); - if (!Objects.equals(edkVersion, EDK_VERSION_NONE)) { - cmd.setEdkVersion(edkVersion); + private void prepareNvRamToStartVmCmd(KVMAgentCommands.StartVmCmd cmd, DiskAO nvRamSpec, KVMHostInventory host) { + VolumeVO vo = Q.New(VolumeVO.class) + .eq(VolumeVO_.uuid, nvRamSpec.getSourceUuid()) + .find(); + if (vo == null) { + if (nvRamSpec.getSourceUuid() != null) { + throw new CloudRuntimeException(String.format("cannot find NvRam volume[uuid:%s]", nvRamSpec.getSourceUuid())); + } + return; } + + VolumeInventory nvRamVolume = VolumeInventory.valueOf(vo); + VolumeTO volume = VolumeTO.valueOfWithOutExtension(nvRamVolume, host, null); + cmd.setNvRam(volume); } @Override @@ -52,6 +106,217 @@ public void startVmOnKvmFailed(KVMHostInventory host, VmInstanceSpec spec, Error } private boolean isUefiBootMode(String bootMode) { - return bootMode.equals(ImageBootMode.UEFI.toString()) || bootMode.equals(ImageBootMode.UEFI_WITH_CSM.toString()); + return VmTpmManager.isUefiBootMode(bootMode); + } + + @Override + public void preBeforeInstantiateVmResource(VmInstanceSpec spec) throws VmInstantiateResourceException { + // do-nothing + } + + @Override + public void preInstantiateVmResource(VmInstanceSpec spec, Completion completion) { + final DiskAO nvRamSpec = spec.getNvRamSpec(); + boolean needRegisterNvRam = nvRamSpec != null; + + Tuple tuple = Q.New(VolumeVO.class) + .eq(VolumeVO_.vmInstanceUuid, spec.getVmInventory().getUuid()) + .eq(VolumeVO_.type, VolumeType.NvRam) + .select(VolumeVO_.uuid, VolumeVO_.status) + .findTuple(); + + String nvRamVolumeUuid = tuple == null ? null : tuple.get(0, String.class); + if (needRegisterNvRam && nvRamVolumeUuid != null) { + nvRamSpec.setSourceUuid(nvRamVolumeUuid); + + VolumeStatus volumeStatus = tuple.get(1, VolumeStatus.class); + if (volumeStatus != VolumeStatus.Ready) { + completion.fail(operr("NvRam volume[uuid:%s] is not ready", nvRamVolumeUuid)); + return; + } + + completion.success(); + return; + } else if (!needRegisterNvRam && nvRamVolumeUuid == null) { + completion.success(); + return; + } else if (needRegisterNvRam) { + nvRamSpec.setPrimaryStorageUuid(Q.New(VolumeVO.class) + .eq(VolumeVO_.type, VolumeType.Root) + .eq(VolumeVO_.vmInstanceUuid, spec.getVmInventory().getUuid()) + .select(VolumeVO_.primaryStorageUuid) + .findValue()); + + NvRamVolumeContext context = new NvRamVolumeContext(); + context.vmUuid = spec.getVmInventory().getUuid(); + context.nvRamSpec = nvRamSpec; + context.spec = spec; + createNvRamVolume(context, new Completion(completion) { + @Override + public void success() { + nvRamSpec.setSourceUuid(context.inventory.getUuid()); + completion.success(); + } + + @Override + public void fail(ErrorCode errorCode) { + completion.fail(errorCode); + } + }); + return; + } + + deleteNvRamVolumeIfExists(spec.getVmInventory().getUuid(), new Completion(completion) { + @Override + public void success() { + completion.success(); + } + + @Override + public void fail(ErrorCode errorCode) { + logger.warn("failed to delete NvRam but still continue: " + errorCode.getReadableDetails()); + completion.success(); + } + }); + } + + @Override + public void preReleaseVmResource(VmInstanceSpec spec, Completion completion) { + completion.success(); + } + + static class NvRamVolumeContext { + String vmUuid; + DiskAO nvRamSpec; + VmInstanceSpec spec; + + VolumeInventory inventory; + } + + @SuppressWarnings("rawtypes") + private void createNvRamVolume(NvRamVolumeContext context, Completion completion) { + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setChainName("create-nv-ram-volume-for-vm-" + context.vmUuid); + chain.then(new Flow() { + String __name__ = "create-nv-ram-volume"; + + @Override + public void run(FlowTrigger trigger, Map data) { + String accountUuid = Q.New(AccountResourceRefVO.class) + .eq(AccountResourceRefVO_.resourceUuid, context.vmUuid) + .select(AccountResourceRefVO_.accountUuid) + .findValue(); + + CreateVolumeMsg msg = new CreateVolumeMsg(); + msg.setAccountUuid(accountUuid); + msg.setSize(context.nvRamSpec.getSize()); + msg.setVmInstanceUuid(context.vmUuid); + msg.setPrimaryStorageUuid(context.nvRamSpec.getPrimaryStorageUuid()); + + // NvRam file is raw type (*.fd) in libvirt 8.0.0 + // and qcow2 in libvirt 8.1.0+ (soon) + // We store it as file system (*.raw) with XFS format + msg.setFormat(VolumeConstant.VOLUME_FORMAT_RAW); + msg.setName(context.nvRamSpec.getName()); + msg.setVolumeType(VolumeType.NvRam.toString()); + + bus.makeLocalServiceId(msg, VolumeConstant.SERVICE_ID); + bus.send(msg, new CloudBusCallBack(completion) { + @Override + public void run(MessageReply reply) { + if (reply.isSuccess()) { + CreateVolumeReply castReply = reply.castReply(); + context.inventory = castReply.getInventory(); + trigger.next(); + return; + } + trigger.fail(operr("failed to create NvRam volume") + .withOpaque("vm.uuid", context.vmUuid) + .withCause(reply.getError())); + } + }); + } + + @Override + public void rollback(FlowRollback trigger, Map data) { + deleteNvRamVolumeIfExists(context.vmUuid, new Completion(trigger) { + @Override + public void success() { + trigger.rollback(); + } + + @Override + public void fail(ErrorCode errorCode) { + logger.warn("failed to delete NvRam but still continue: " + errorCode.getReadableDetails()); + trigger.rollback(); + } + }); + } + }).then(new NoRollbackFlow() { + String __name__ = "instantiate-nvram-volume"; + + @Override + public void run(FlowTrigger trigger, Map data) { + InstantiateVolumeMsg msg = new InstantiateVolumeMsg(); + msg.setHostUuid(context.spec.getDestHost().getUuid()); + msg.setPrimaryStorageUuid(context.nvRamSpec.getPrimaryStorageUuid()); + msg.setVolumeUuid(context.inventory.getUuid()); + + bus.makeTargetServiceIdByResourceUuid(msg, VolumeConstant.SERVICE_ID, msg.getVolumeUuid()); + bus.send(msg, new CloudBusCallBack(completion) { + @Override + public void run(MessageReply reply) { + if (reply.isSuccess()) { + trigger.next(); + return; + } + trigger.fail(operr("failed to instantiate NvRam volume") + .withOpaque("vm.uuid", context.vmUuid) + .withCause(reply.getError())); + } + }); + } + }).done(new FlowDoneHandler(completion) { + @Override + public void handle(Map data) { + completion.success(); + } + }).error(new FlowErrorHandler(completion) { + @Override + public void handle(ErrorCode errCode, Map data) { + completion.fail(errCode); + } + }).start(); + } + + private void deleteNvRamVolumeIfExists(String vmUuid, Completion completion) { + String volumeUuid = Q.New(VolumeVO.class) + .eq(VolumeVO_.vmInstanceUuid, vmUuid) + .eq(VolumeVO_.type, VolumeType.NvRam) + .select(VolumeVO_.uuid) + .findValue(); + if (volumeUuid == null) { + completion.success(); + return; + } + + DeleteVolumeMsg msg = new DeleteVolumeMsg(); + msg.setDetachBeforeDeleting(false); + msg.setUuid(volumeUuid); + msg.setDeletionPolicy(VolumeDeletionPolicyManager.VolumeDeletionPolicy.Direct.toString()); + bus.makeTargetServiceIdByResourceUuid(msg, VolumeConstant.SERVICE_ID, volumeUuid); + bus.send(msg, new CloudBusCallBack(completion) { + @Override + public void run(MessageReply reply) { + if (reply.isSuccess()) { + completion.success(); + return; + } + completion.fail(operr("failed to delete NvRam volume") + .withOpaque("vm.uuid", vmUuid) + .withOpaque("volume.uuid", volumeUuid) + .withCause(reply.getError())); + } + }); } } diff --git a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java index e8d268e518a..9bba32e1d57 100755 --- a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java +++ b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java @@ -963,6 +963,10 @@ public String makeDataVolumeInstallUrl(String volUuid) { return PathUtil.join(self.getUrl(), PrimaryStoragePathMaker.makeDataVolumeInstallPath(volUuid)); } + public String makeNvRamVolumeInstallUrl(String volUuid) { + return PathUtil.join(self.getUrl(), PrimaryStoragePathMaker.makeNvRamVolumeInstallPath(volUuid)); + } + public boolean isCachedImageUrl(String path){ return path.startsWith(PathUtil.join(self.getUrl(), PrimaryStoragePathMaker.getCachedImageInstallDir())); } @@ -1288,6 +1292,8 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final cmd.setInstallUrl(makeMemoryVolumeInstallUrl(volume)); } else if (VolumeType.Cache.toString().equals(volume.getType())) { cmd.setInstallUrl(makeDataVolumeInstallUrl(volume.getUuid())); + } else if (VolumeType.NvRam.toString().equals(volume.getType())) { + cmd.setInstallUrl(makeNvRamVolumeInstallUrl(volume.getUuid())); } } cmd.setName(volume.getName()); diff --git a/storage/src/main/java/org/zstack/storage/primary/PrimaryStoragePathMaker.java b/storage/src/main/java/org/zstack/storage/primary/PrimaryStoragePathMaker.java index 7ef4b4dcd24..2458ee91d60 100755 --- a/storage/src/main/java/org/zstack/storage/primary/PrimaryStoragePathMaker.java +++ b/storage/src/main/java/org/zstack/storage/primary/PrimaryStoragePathMaker.java @@ -43,6 +43,10 @@ public static String makeDataVolumeInstallPath(String volUuid) { return PathUtil.join("dataVolumes", "acct-" + getAccountUuidOfResource(volUuid), "vol-" + volUuid, volUuid + ".qcow2"); } + public static String makeNvRamVolumeInstallPath(String volUuid) { + return PathUtil.join("nvRam", "acct-" + getAccountUuidOfResource(volUuid), "vol-" + volUuid, volUuid + ".raw"); + } + public static String makeImageFromSnapshotWorkspacePath(String imageUuid) { return PathUtil.join("snapshotWorkspace", String.format("image-%s", imageUuid)); } diff --git a/storage/src/main/java/org/zstack/storage/volume/VolumeBase.java b/storage/src/main/java/org/zstack/storage/volume/VolumeBase.java index 272d75d7a09..e0ef4a6141a 100755 --- a/storage/src/main/java/org/zstack/storage/volume/VolumeBase.java +++ b/storage/src/main/java/org/zstack/storage/volume/VolumeBase.java @@ -551,7 +551,8 @@ public void fail(ErrorCode errorCode) { } else if (msg instanceof InstantiateMemoryVolumeMsg) { instantiateMemoryVolume(msg, trigger); } else { - instantiateDataVolume(msg, trigger); + // include: data volume, NvRam, TpmState + instantiateOtherVolume(msg, trigger); } } } @@ -593,7 +594,7 @@ private void instantiateRootVolume(InstantiateRootVolumeMsg msg, FlowTrigger tri doInstantiateVolume(imsg, trigger); } - private void instantiateDataVolume(InstantiateVolumeMsg msg, FlowTrigger trigger) { + private void instantiateOtherVolume(InstantiateVolumeMsg msg, FlowTrigger trigger) { InstantiateVolumeOnPrimaryStorageMsg imsg = new InstantiateVolumeOnPrimaryStorageMsg(); prepareMsg(msg, imsg); doInstantiateVolume(imsg, trigger); diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 462aa352dc8..4e57880a7ce 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -260,6 +260,7 @@ + From 6eb703b24318f42ae48128b1db56ffd57df8e852 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Fri, 13 Feb 2026 18:28:55 +0800 Subject: [PATCH 31/76] [localstorage]: support create empty volume in raw types Libvirt 8.0.0 only support raw type NVRAW (ext is '.fd'). Resolves: ZSV-11310 Related: ZSPHER-1 Change-Id: I6c65797078616c706d6c716c796b63737a716375 --- .../ceph/primary/CephPrimaryStorageBase.java | 13 +++++++++++- .../primary/local/LocalStorageKvmBackend.java | 21 +++++++++++++++++-- .../nfs/NfsPrimaryStorageKVMBackend.java | 5 ++++- .../NfsPrimaryStorageKVMBackendCommands.java | 8 +++++++ .../nfs/NfsPrimaryStorageKvmHelper.java | 4 ++++ .../storage/primary/smp/KvmBackend.java | 10 ++++++++- 6 files changed, 56 insertions(+), 5 deletions(-) diff --git a/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java b/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java index 9429bf63645..68e311dbcb1 100755 --- a/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java +++ b/plugin/ceph/src/main/java/org/zstack/storage/ceph/primary/CephPrimaryStorageBase.java @@ -1729,7 +1729,18 @@ private void createEmptyVolume(final InstantiateVolumeOnPrimaryStorageMsg msg) { cmd.size = msg.getVolume().getSize(); cmd.setShareable(msg.getVolume().isShareable()); cmd.skipIfExisting = msg.isSkipIfExisting(); - cmd.format = msg.hasSystemTag(VolumeSystemTags.FORMAT_QCOW2.getTagFormat()) ? VolumeConstant.VOLUME_FORMAT_QCOW2 : VolumeConstant.VOLUME_FORMAT_RAW ; + + VolumeType type = Q.New(VolumeVO.class) + .eq(VolumeVO_.uuid, volumeUuid) + .select(VolumeVO_.type) + .findValue(); + if (type == VolumeType.NvRam) { + cmd.format = VolumeConstant.VOLUME_FORMAT_RAW; + } else { + cmd.format = msg.hasSystemTag(VolumeSystemTags.FORMAT_QCOW2.getTagFormat()) ? + VolumeConstant.VOLUME_FORMAT_QCOW2 : + VolumeConstant.VOLUME_FORMAT_RAW ; + } final InstantiateVolumeOnPrimaryStorageReply reply = new InstantiateVolumeOnPrimaryStorageReply(); diff --git a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java index 9bba32e1d57..9f3c1431589 100755 --- a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java +++ b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java @@ -33,6 +33,7 @@ import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.host.*; import org.zstack.header.image.ImageBackupStorageRefInventory; +import org.zstack.header.image.ImageConstant; import org.zstack.header.image.ImageConstant.ImageMediaType; import org.zstack.header.image.ImageInventory; import org.zstack.header.image.ImageStatus; @@ -202,6 +203,7 @@ public static class CreateEmptyVolumeCmd extends AgentCommand { private String name; private String volumeUuid; private String backingFile; + private String format; public String getBackingFile() { return backingFile; @@ -211,6 +213,14 @@ public void setBackingFile(String backingFile) { this.backingFile = backingFile; } + public String getFormat() { + return format; + } + + public void setFormat(String format) { + this.format = format; + } + public String getInstallUrl() { return installUrl; } @@ -1258,7 +1268,7 @@ public void success(VolumeStats returnValue) { VolumeInventory vol = msg.getVolume(); vol.setInstallPath(returnValue.getInstallPath()); vol.setActualSize(returnValue.getActualSize()); - vol.setFormat(VolumeConstant.VOLUME_FORMAT_QCOW2); + vol.setFormat(returnValue.getFormat()); if (returnValue.getSize() != null) { vol.setSize(returnValue.getSize()); } @@ -1283,7 +1293,11 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final cmd.setAccountUuid(acntMgr.getOwnerAccountUuidOfResource(volume.getUuid())); if (volume.getInstallPath() != null && !volume.getInstallPath().equals("")) { cmd.setInstallUrl(volume.getInstallPath()); + cmd.setFormat(VolumeType.NvRam.toString().equals(volume.getType()) + ? ImageConstant.RAW_FORMAT_STRING + : ImageConstant.QCOW2_FORMAT_STRING); } else { + cmd.setFormat(ImageConstant.QCOW2_FORMAT_STRING); if (VolumeType.Root.toString().equals(volume.getType())) { cmd.setInstallUrl(makeRootVolumeInstallUrl(volume)); } else if (VolumeType.Data.toString().equals(volume.getType())) { @@ -1294,6 +1308,7 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final cmd.setInstallUrl(makeDataVolumeInstallUrl(volume.getUuid())); } else if (VolumeType.NvRam.toString().equals(volume.getType())) { cmd.setInstallUrl(makeNvRamVolumeInstallUrl(volume.getUuid())); + cmd.setFormat(ImageConstant.RAW_FORMAT_STRING); } } cmd.setName(volume.getName()); @@ -1304,7 +1319,9 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final httpCall(CREATE_EMPTY_VOLUME_PATH, hostUuid, cmd, CreateEmptyVolumeRsp.class, new ReturnValueCompletion(completion) { @Override public void success(CreateEmptyVolumeRsp returnValue) { - completion.success(new VolumeStats(cmd.getInstallUrl(), returnValue.actualSize, returnValue.size)); + final VolumeStats stats = new VolumeStats(cmd.getInstallUrl(), returnValue.actualSize, returnValue.size); + stats.setFormat(cmd.getFormat()); + completion.success(stats); } @Override diff --git a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java index 93d3d7aab99..b52fb127eda 100755 --- a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java +++ b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackend.java @@ -1107,6 +1107,9 @@ public void instantiateVolume(final PrimaryStorageInventory pinv, HostInventory cmd.setInstallUrl(NfsPrimaryStorageKvmHelper.makeDataVolumeInstallUrl(pinv, volume.getUuid())); } else if (volume.getType().equals(VolumeType.Cache.toString())) { cmd.setInstallUrl(NfsPrimaryStorageKvmHelper.makeDataVolumeInstallUrl(pinv, volume.getUuid())); + } else if (volume.getType().equals(VolumeType.NvRam.toString())) { + cmd.setInstallUrl(NfsPrimaryStorageKvmHelper.makeNvRamVolumeInstallUrl(pinv, volume.getUuid())); + cmd.setVolumeFormat(VolumeConstant.VOLUME_FORMAT_RAW); } else { throw new CloudRuntimeException(String.format("unknown volume type %s", volume.getType())); } @@ -1139,7 +1142,7 @@ public void run(MessageReply reply) { } volume.setInstallPath(cmd.getInstallUrl()); - volume.setFormat(VolumeConstant.VOLUME_FORMAT_QCOW2); + volume.setFormat(cmd.getVolumeFormat()); volume.setActualSize(rsp.actualSize); if (rsp.size != null) { volume.setSize(rsp.size); diff --git a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackendCommands.java b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackendCommands.java index d7cebfaa4c6..a105552fdd9 100755 --- a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackendCommands.java +++ b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKVMBackendCommands.java @@ -2,6 +2,7 @@ import org.zstack.header.HasThreadContext; import org.zstack.header.core.validation.Validation; +import org.zstack.header.volume.VolumeConstant; import org.zstack.kvm.KVMAgentCommands; import org.zstack.kvm.KVMAgentCommands.AgentCommand; import org.zstack.kvm.KVMAgentCommands.AgentResponse; @@ -319,6 +320,7 @@ public abstract static class CreateVolumeCmd extends NfsPrimaryStorageAgentComma private String hypervisorType; private String name; private String volumeUuid; + private String volumeFormat = VolumeConstant.VOLUME_FORMAT_QCOW2; protected long virtualSize; public String getInstallUrl() { @@ -351,6 +353,12 @@ public String getVolumeUuid() { public void setVolumeUuid(String uuid) { this.volumeUuid = uuid; } + public String getVolumeFormat() { + return volumeFormat; + } + public void setVolumeFormat(String volumeFormat) { + this.volumeFormat = volumeFormat; + } public long getVirtualSize() { return virtualSize; } diff --git a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKvmHelper.java b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKvmHelper.java index a58456a47db..5de2022ebc1 100755 --- a/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKvmHelper.java +++ b/plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorageKvmHelper.java @@ -44,6 +44,10 @@ public static String makeCachedImageInstallUrl(PrimaryStorageInventory pinv, Ima return ImageCacheUtil.getImageCachePath(iminv, it -> PathUtil.join(pinv.getMountPath(), PrimaryStoragePathMaker.makeCachedImageInstallPath(iminv))); } + public static String makeNvRamVolumeInstallUrl(PrimaryStorageInventory pinv, String volUuid) { + return PathUtil.join(pinv.getMountPath(), PrimaryStoragePathMaker.makeNvRamVolumeInstallPath(volUuid)); + } + public static String getCachedImageDir(PrimaryStorageInventory pinv){ return PathUtil.join(pinv.getMountPath(), PrimaryStoragePathMaker.getCachedImageInstallDir()); } diff --git a/plugin/sharedMountPointPrimaryStorage/src/main/java/org/zstack/storage/primary/smp/KvmBackend.java b/plugin/sharedMountPointPrimaryStorage/src/main/java/org/zstack/storage/primary/smp/KvmBackend.java index ee7faa6c40c..af5e39a76bb 100755 --- a/plugin/sharedMountPointPrimaryStorage/src/main/java/org/zstack/storage/primary/smp/KvmBackend.java +++ b/plugin/sharedMountPointPrimaryStorage/src/main/java/org/zstack/storage/primary/smp/KvmBackend.java @@ -321,6 +321,7 @@ public static class CreateEmptyVolumeCmd extends AgentCmd { public String name; public String volumeUuid; public String backingFile; + public String volumeFormat = VolumeConstant.VOLUME_FORMAT_QCOW2; } public static class CreateEmptyVolumeRsp extends AgentRsp { @@ -617,6 +618,10 @@ public String makeCachedImageInstallUrl(ImageInventory iminv) { return ImageCacheUtil.getImageCachePath(iminv, it -> PathUtil.join(self.getMountPath(), PrimaryStoragePathMaker.makeCachedImageInstallPath(iminv))); } + public String makeNvRamVolumeInstallUrl(String volUuid) { + return PathUtil.join(self.getMountPath(), PrimaryStoragePathMaker.makeNvRamVolumeInstallPath(volUuid)); + } + public String makeCachedImageInstallUrlFromImageUuidForTemplate(String imageUuid) { return PathUtil.join(self.getMountPath(), PrimaryStoragePathMaker.makeCachedImageInstallPathFromImageUuidForTemplate(imageUuid)); } @@ -969,6 +974,9 @@ private void createEmptyVolume(final VolumeInventory volume, String hostUuid, fi cmd.installPath = makeRootVolumeInstallUrl(volume); } else if (VolumeType.Data.toString().equals(volume.getType())) { cmd.installPath = makeDataVolumeInstallUrl(volume.getUuid()); + } else if (VolumeType.NvRam.toString().equals(volume.getType())) { + cmd.installPath = makeNvRamVolumeInstallUrl(volume.getUuid()); + cmd.volumeFormat = VolumeConstant.VOLUME_FORMAT_RAW; } else { DebugUtils.Assert(false, "Should not be here"); } @@ -982,7 +990,7 @@ public void success(AgentRsp returnValue) { CreateEmptyVolumeRsp rsp = (CreateEmptyVolumeRsp) returnValue; InstantiateVolumeOnPrimaryStorageReply reply = new InstantiateVolumeOnPrimaryStorageReply(); volume.setInstallPath(cmd.installPath); - volume.setFormat(VolumeConstant.VOLUME_FORMAT_QCOW2); + volume.setFormat(cmd.volumeFormat); volume.setActualSize(rsp.actualSize); if (rsp.size != null) { volume.setSize(rsp.size); From e765ffa736c485b8c19c7cdc0120a859f66ae5b6 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 26 Feb 2026 11:13:35 +0800 Subject: [PATCH 32/76] [localstorage]: remame CreateEmptyVolumeCmd.format to volumeFormat Related: ZSV-11310 Change-Id: I666370616e627a616378676f796971657662726f --- .../primary/local/LocalStorageKvmBackend.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java index 9f3c1431589..f45e9127817 100755 --- a/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java +++ b/plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java @@ -203,7 +203,7 @@ public static class CreateEmptyVolumeCmd extends AgentCommand { private String name; private String volumeUuid; private String backingFile; - private String format; + private String volumeFormat; public String getBackingFile() { return backingFile; @@ -213,12 +213,12 @@ public void setBackingFile(String backingFile) { this.backingFile = backingFile; } - public String getFormat() { - return format; + public String getVolumeFormat() { + return volumeFormat; } - public void setFormat(String format) { - this.format = format; + public void setVolumeFormat(String volumeFormat) { + this.volumeFormat = volumeFormat; } public String getInstallUrl() { @@ -1293,11 +1293,11 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final cmd.setAccountUuid(acntMgr.getOwnerAccountUuidOfResource(volume.getUuid())); if (volume.getInstallPath() != null && !volume.getInstallPath().equals("")) { cmd.setInstallUrl(volume.getInstallPath()); - cmd.setFormat(VolumeType.NvRam.toString().equals(volume.getType()) + cmd.setVolumeFormat(VolumeType.NvRam.toString().equals(volume.getType()) ? ImageConstant.RAW_FORMAT_STRING : ImageConstant.QCOW2_FORMAT_STRING); } else { - cmd.setFormat(ImageConstant.QCOW2_FORMAT_STRING); + cmd.setVolumeFormat(ImageConstant.QCOW2_FORMAT_STRING); if (VolumeType.Root.toString().equals(volume.getType())) { cmd.setInstallUrl(makeRootVolumeInstallUrl(volume)); } else if (VolumeType.Data.toString().equals(volume.getType())) { @@ -1308,7 +1308,7 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final cmd.setInstallUrl(makeDataVolumeInstallUrl(volume.getUuid())); } else if (VolumeType.NvRam.toString().equals(volume.getType())) { cmd.setInstallUrl(makeNvRamVolumeInstallUrl(volume.getUuid())); - cmd.setFormat(ImageConstant.RAW_FORMAT_STRING); + cmd.setVolumeFormat(ImageConstant.RAW_FORMAT_STRING); } } cmd.setName(volume.getName()); @@ -1320,7 +1320,7 @@ public void createEmptyVolumeWithBackingFile(final VolumeInventory volume, final @Override public void success(CreateEmptyVolumeRsp returnValue) { final VolumeStats stats = new VolumeStats(cmd.getInstallUrl(), returnValue.actualSize, returnValue.size); - stats.setFormat(cmd.getFormat()); + stats.setFormat(cmd.getVolumeFormat()); completion.success(stats); } From f05b9106ae7a0470d005d26f52f2ef35970dd3d3 Mon Sep 17 00:00:00 2001 From: "tao.yang" Date: Thu, 26 Feb 2026 21:40:29 +0800 Subject: [PATCH 33/76] [kms]: support kms trust API Resolves: ZSV-11331 Change-Id: I63646d7974756278777565696276797066796f68 --- sdk/src/main/java/SourceClassMap.java | 2 + .../java/org/zstack/sdk/CertificateInfo.java | 55 +++++++ .../java/org/zstack/sdk/KmsInventory.java | 19 ++- .../java/org/zstack/sdk/NkpRestoreInfo.java | 16 +-- .../keyprovider/kms/api/CreateKmsAction.java | 3 - .../api/GetKmsServerCertFromKmsAction.java | 101 +++++++++++++ .../api/GetKmsServerCertFromKmsResult.java | 30 ++++ .../kms/api/UploadKmsClientCsrAction.java | 107 ++++++++++++++ .../kms/api/UploadKmsClientCsrResult.java | 14 ++ .../api/UploadKmsClientIdentityAction.java | 110 ++++++++++++++ .../api/UploadKmsClientIdentityResult.java | 14 ++ .../api/UploadKmsClientSignedCertAction.java | 104 ++++++++++++++ .../api/UploadKmsClientSignedCertResult.java | 14 ++ .../kms/api/UploadKmsServerCertAction.java | 104 ++++++++++++++ .../kms/api/UploadKmsServerCertResult.java | 14 ++ .../keyprovider/nkp/api/CreateNkpAction.java | 3 - .../nkp/api/ParseNkpRestoreResult.java | 16 +++ .../java/org/zstack/testlib/ApiHelper.groovy | 135 ++++++++++++++++++ 18 files changed, 842 insertions(+), 19 deletions(-) create mode 100644 sdk/src/main/java/org/zstack/sdk/CertificateInfo.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertResult.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertAction.java create mode 100644 sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertResult.java diff --git a/sdk/src/main/java/SourceClassMap.java b/sdk/src/main/java/SourceClassMap.java index 3829d9077ac..160dc8aad5e 100644 --- a/sdk/src/main/java/SourceClassMap.java +++ b/sdk/src/main/java/SourceClassMap.java @@ -178,6 +178,7 @@ public class SourceClassMap { put("org.zstack.header.image.APIGetUploadImageJobDetailsReply$JobDetails", "org.zstack.sdk.JobDetails"); put("org.zstack.header.image.ImageBackupStorageRefInventory", "org.zstack.sdk.ImageBackupStorageRefInventory"); put("org.zstack.header.image.ImageInventory", "org.zstack.sdk.ImageInventory"); + put("org.zstack.header.keyprovider.CertificateInfo", "org.zstack.sdk.CertificateInfo"); put("org.zstack.header.keyprovider.KeyProviderInventory", "org.zstack.sdk.KeyProviderInventory"); put("org.zstack.header.keyprovider.KmsIdentityInventory", "org.zstack.sdk.KmsIdentityInventory"); put("org.zstack.header.keyprovider.KmsInventory", "org.zstack.sdk.KmsInventory"); @@ -763,6 +764,7 @@ public class SourceClassMap { put("org.zstack.sdk.CephPrimaryStorageInventory", "org.zstack.storage.ceph.primary.CephPrimaryStorageInventory"); put("org.zstack.sdk.CephPrimaryStorageMonInventory", "org.zstack.storage.ceph.primary.CephPrimaryStorageMonInventory"); put("org.zstack.sdk.CephPrimaryStoragePoolInventory", "org.zstack.storage.ceph.primary.CephPrimaryStoragePoolInventory"); + put("org.zstack.sdk.CertificateInfo", "org.zstack.header.keyprovider.CertificateInfo"); put("org.zstack.sdk.CertificateInventory", "org.zstack.network.service.lb.CertificateInventory"); put("org.zstack.sdk.ChainInfo", "org.zstack.header.core.progress.ChainInfo"); put("org.zstack.sdk.ChronyServerInfo", "org.zstack.zops.ChronyServerInfo"); diff --git a/sdk/src/main/java/org/zstack/sdk/CertificateInfo.java b/sdk/src/main/java/org/zstack/sdk/CertificateInfo.java new file mode 100644 index 00000000000..5dc36854ed1 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/CertificateInfo.java @@ -0,0 +1,55 @@ +package org.zstack.sdk; + + + +public class CertificateInfo { + + public java.lang.String subject; + public void setSubject(java.lang.String subject) { + this.subject = subject; + } + public java.lang.String getSubject() { + return this.subject; + } + + public java.lang.String issuer; + public void setIssuer(java.lang.String issuer) { + this.issuer = issuer; + } + public java.lang.String getIssuer() { + return this.issuer; + } + + public java.lang.String commonName; + public void setCommonName(java.lang.String commonName) { + this.commonName = commonName; + } + public java.lang.String getCommonName() { + return this.commonName; + } + + public java.util.List subjectAltNamesDns; + public void setSubjectAltNamesDns(java.util.List subjectAltNamesDns) { + this.subjectAltNamesDns = subjectAltNamesDns; + } + public java.util.List getSubjectAltNamesDns() { + return this.subjectAltNamesDns; + } + + public java.util.List subjectAltNamesIp; + public void setSubjectAltNamesIp(java.util.List subjectAltNamesIp) { + this.subjectAltNamesIp = subjectAltNamesIp; + } + public java.util.List getSubjectAltNamesIp() { + return this.subjectAltNamesIp; + } + + public java.sql.Timestamp expiredDate; + public void setExpiredDate(java.sql.Timestamp expiredDate) { + this.expiredDate = expiredDate; + } + public java.sql.Timestamp getExpiredDate() { + return this.expiredDate; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/KmsInventory.java b/sdk/src/main/java/org/zstack/sdk/KmsInventory.java index fb544369699..65ee119c300 100644 --- a/sdk/src/main/java/org/zstack/sdk/KmsInventory.java +++ b/sdk/src/main/java/org/zstack/sdk/KmsInventory.java @@ -1,5 +1,6 @@ package org.zstack.sdk; +import org.zstack.sdk.CertificateInfo; import org.zstack.sdk.KmsIdentityInventory; public class KmsInventory extends org.zstack.sdk.KeyProviderInventory { @@ -52,12 +53,20 @@ public java.lang.String getActiveIdentityUuid() { return this.activeIdentityUuid; } - public java.sql.Timestamp serverCertExpiredDate; - public void setServerCertExpiredDate(java.sql.Timestamp serverCertExpiredDate) { - this.serverCertExpiredDate = serverCertExpiredDate; + public java.lang.String serverCertPem; + public void setServerCertPem(java.lang.String serverCertPem) { + this.serverCertPem = serverCertPem; } - public java.sql.Timestamp getServerCertExpiredDate() { - return this.serverCertExpiredDate; + public java.lang.String getServerCertPem() { + return this.serverCertPem; + } + + public CertificateInfo serverCertInfo; + public void setServerCertInfo(CertificateInfo serverCertInfo) { + this.serverCertInfo = serverCertInfo; + } + public CertificateInfo getServerCertInfo() { + return this.serverCertInfo; } public KmsIdentityInventory activeIdentity; diff --git a/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java b/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java index ae041ab435b..a9f108dab40 100644 --- a/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java +++ b/sdk/src/main/java/org/zstack/sdk/NkpRestoreInfo.java @@ -44,14 +44,6 @@ public java.lang.String getSaltPolicy() { return this.saltPolicy; } - public java.lang.String encryptedMasterSeed; - public void setEncryptedMasterSeed(java.lang.String encryptedMasterSeed) { - this.encryptedMasterSeed = encryptedMasterSeed; - } - public java.lang.String getEncryptedMasterSeed() { - return this.encryptedMasterSeed; - } - public java.lang.Integer currentVersion; public void setCurrentVersion(java.lang.Integer currentVersion) { this.currentVersion = currentVersion; @@ -60,4 +52,12 @@ public java.lang.Integer getCurrentVersion() { return this.currentVersion; } + public java.lang.Long backupTime; + public void setBackupTime(java.lang.Long backupTime) { + this.backupTime = backupTime; + } + public java.lang.Long getBackupTime() { + return this.backupTime; + } + } diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java index 827c6d5fa38..ba43ed60a34 100644 --- a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/CreateKmsAction.java @@ -46,9 +46,6 @@ public Result throwExceptionIfError() { @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.lang.String description; - @Param(required = false) - public java.lang.String type; - @Param(required = false) public java.lang.String resourceUuid; diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsAction.java new file mode 100644 index 00000000000..405d905ffc5 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsAction.java @@ -0,0 +1,101 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class GetKmsServerCertFromKmsAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "getKmsServerCertFromKms"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsResult.java new file mode 100644 index 00000000000..08a835fb0a5 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/GetKmsServerCertFromKmsResult.java @@ -0,0 +1,30 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.CertificateInfo; + +public class GetKmsServerCertFromKmsResult { + public java.lang.String serverCertPem; + public void setServerCertPem(java.lang.String serverCertPem) { + this.serverCertPem = serverCertPem; + } + public java.lang.String getServerCertPem() { + return this.serverCertPem; + } + + public boolean selfSigned; + public void setSelfSigned(boolean selfSigned) { + this.selfSigned = selfSigned; + } + public boolean getSelfSigned() { + return this.selfSigned; + } + + public CertificateInfo serverCertInfo; + public void setServerCertInfo(CertificateInfo serverCertInfo) { + this.serverCertInfo = serverCertInfo; + } + public CertificateInfo getServerCertInfo() { + return this.serverCertInfo; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrAction.java new file mode 100644 index 00000000000..84f2ad9b579 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrAction.java @@ -0,0 +1,107 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UploadKmsClientCsrAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String csrPem; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String csrKeyPem; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "uploadKmsClientCsr"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrResult.java new file mode 100644 index 00000000000..562abefd6f9 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientCsrResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KmsIdentityInventory; + +public class UploadKmsClientCsrResult { + public KmsIdentityInventory inventory; + public void setInventory(KmsIdentityInventory inventory) { + this.inventory = inventory; + } + public KmsIdentityInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityAction.java new file mode 100644 index 00000000000..adc46da986f --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityAction.java @@ -0,0 +1,110 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UploadKmsClientIdentityAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = true, validValues = {"PLATFORM","UPLOADED","CSR"}, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String identityType; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String kmsClientCertPem; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String kmsClientKeyPem; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "uploadKmsClientIdentity"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityResult.java new file mode 100644 index 00000000000..e71e7a51c1e --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientIdentityResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KmsIdentityInventory; + +public class UploadKmsClientIdentityResult { + public KmsIdentityInventory inventory; + public void setInventory(KmsIdentityInventory inventory) { + this.inventory = inventory; + } + public KmsIdentityInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertAction.java new file mode 100644 index 00000000000..eb2a7e94085 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UploadKmsClientSignedCertAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String signedClientCertPem; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "uploadKmsClientSignedCert"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertResult.java new file mode 100644 index 00000000000..92ada658c38 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsClientSignedCertResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KmsIdentityInventory; + +public class UploadKmsClientSignedCertResult { + public KmsIdentityInventory inventory; + public void setInventory(KmsIdentityInventory inventory) { + this.inventory = inventory; + } + public KmsIdentityInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertAction.java new file mode 100644 index 00000000000..023bd8d469e --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertAction.java @@ -0,0 +1,104 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import java.util.HashMap; +import java.util.Map; +import org.zstack.sdk.*; + +public class UploadKmsServerCertAction extends AbstractAction { + + private static final HashMap parameterMap = new HashMap<>(); + + private static final HashMap nonAPIParameterMap = new HashMap<>(); + + public static class Result { + public ErrorCode error; + public org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertResult value; + + public Result throwExceptionIfError() { + if (error != null) { + throw new ApiException( + String.format("error[code: %s, description: %s, details: %s]", error.code, error.description, error.details) + ); + } + + return this; + } + } + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String uuid; + + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String serverCertPem; + + @Param(required = false) + public java.util.List systemTags; + + @Param(required = false) + public java.util.List userTags; + + @Param(required = false) + public String sessionId; + + @Param(required = false) + public String accessKeyId; + + @Param(required = false) + public String accessKeySecret; + + @Param(required = false) + public String requestIp; + + @NonAPIParam + public long timeout = -1; + + @NonAPIParam + public long pollingInterval = -1; + + + private Result makeResult(ApiResult res) { + Result ret = new Result(); + if (res.error != null) { + ret.error = res.error; + return ret; + } + + org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertResult value = res.getResult(org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertResult.class); + ret.value = value == null ? new org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertResult() : value; + + return ret; + } + + public Result call() { + ApiResult res = ZSClient.call(this); + return makeResult(res); + } + + public void call(final Completion completion) { + ZSClient.call(this, new InternalCompletion() { + @Override + public void complete(ApiResult res) { + completion.complete(makeResult(res)); + } + }); + } + + protected Map getParameterMap() { + return parameterMap; + } + + protected Map getNonAPIParameterMap() { + return nonAPIParameterMap; + } + + protected RestInfo getRestInfo() { + RestInfo info = new RestInfo(); + info.httpMethod = "PUT"; + info.path = "/key-providers/kms/{uuid}/actions"; + info.needSession = true; + info.needPoll = true; + info.parameterName = "uploadKmsServerCert"; + return info; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertResult.java new file mode 100644 index 00000000000..7a0101c99c4 --- /dev/null +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/kms/api/UploadKmsServerCertResult.java @@ -0,0 +1,14 @@ +package org.zstack.sdk.keyprovider.kms.api; + +import org.zstack.sdk.KmsInventory; + +public class UploadKmsServerCertResult { + public KmsInventory inventory; + public void setInventory(KmsInventory inventory) { + this.inventory = inventory; + } + public KmsInventory getInventory() { + return this.inventory; + } + +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java index 45ca9d824c9..15a3642d5c0 100644 --- a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/CreateNkpAction.java @@ -37,9 +37,6 @@ public Result throwExceptionIfError() { @Param(required = false, maxLength = 2048, nonempty = false, nullElements = false, emptyString = true, noTrim = false) public java.lang.String description; - @Param(required = false) - public java.lang.String type; - @Param(required = false) public java.lang.String resourceUuid; diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java index 2fee861ac11..5b5ef838c90 100644 --- a/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/nkp/api/ParseNkpRestoreResult.java @@ -11,4 +11,20 @@ public NkpRestoreInfo getRestoreInfo() { return this.restoreInfo; } + public java.lang.String code; + public void setCode(java.lang.String code) { + this.code = code; + } + public java.lang.String getCode() { + return this.code; + } + + public java.lang.String reason; + public void setReason(java.lang.String reason) { + this.reason = reason; + } + public java.lang.String getReason() { + return this.reason; + } + } diff --git a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy index cfd879afb45..24fd79034bd 100644 --- a/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy +++ b/testlib/src/main/java/org/zstack/testlib/ApiHelper.groovy @@ -36608,6 +36608,33 @@ abstract class ApiHelper { } + def getKmsServerCertFromKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.GetKmsServerCertFromKmsAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def queryKms(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.QueryKmsAction.class) Closure c) { def a = new org.zstack.sdk.keyprovider.kms.api.QueryKmsAction() a.sessionId = Test.currentEnvSpec?.session?.uuid @@ -36664,6 +36691,114 @@ abstract class ApiHelper { } + def uploadKmsClientCsr(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientCsrAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def uploadKmsClientIdentity(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientIdentityAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def uploadKmsClientSignedCert(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.UploadKmsClientSignedCertAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + + def uploadKmsServerCert(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertAction.class) Closure c) { + def a = new org.zstack.sdk.keyprovider.kms.api.UploadKmsServerCertAction() + a.sessionId = Test.currentEnvSpec?.session?.uuid + c.resolveStrategy = Closure.OWNER_FIRST + c.delegate = a + c() + + + if (System.getProperty("apipath") != null) { + if (a.apiId == null) { + a.apiId = Platform.uuid + } + + def tracker = new ApiPathTracker(a.apiId) + def out = errorOut(a.call()) + def path = tracker.getApiPath() + if (!path.isEmpty()) { + Test.apiPaths[a.class.name] = path.join(" --->\n") + } + + return out + } else { + return errorOut(a.call()) + } + } + + def backupNkp(@DelegatesTo(strategy = Closure.OWNER_FIRST, value = org.zstack.sdk.keyprovider.nkp.api.BackupNkpAction.class) Closure c) { def a = new org.zstack.sdk.keyprovider.nkp.api.BackupNkpAction() a.sessionId = Test.currentEnvSpec?.session?.uuid From 22dd42badf16b2fd5370c664b613c7e367f54f41 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 3 Mar 2026 14:34:39 +0800 Subject: [PATCH 34/76] [header]: introduce VmHostFileVO * introduces a management infrastructure for VM-host file (ex: NvRam and TPM state), encompassing a data model KVM agent communication protocol, persistent storage and integration with Secure Boot and TPM extensions. * New global configuration properties have been adde to control the enablement behavior of NvRam volumes. Resolves: ZSV-11310 Change-Id: I716c766875616b79736c69666d7863767276706c --- .../legacy/ComputeLegacyGlobalProperty.java | 7 + .../compute/vm/devices/VmTpmExtensions.java | 41 +- .../compute/vm/devices/VmTpmManager.java | 23 - conf/db/zsv/V5.0.0__schema.sql | 26 ++ conf/persistence.xml | 2 + conf/springConfigXml/Kvm.xml | 7 + .../vm/additions/VmHostFileContentFormat.java | 6 + .../vm/additions/VmHostFileContentVO.java | 93 ++++ .../vm/additions/VmHostFileContentVO_.java | 14 + .../header/vm/additions/VmHostFileType.java | 8 + .../header/vm/additions/VmHostFileVO.java | 107 +++++ .../header/vm/additions/VmHostFileVO_.java | 17 + .../java/org/zstack/kvm/KVMAgentCommands.java | 105 +++++ .../main/java/org/zstack/kvm/KVMConstant.java | 11 + .../src/main/java/org/zstack/kvm/KVMHost.java | 10 + .../kvm/efi/KvmSecureBootExtensions.java | 406 +++++++++++++++++- .../zstack/kvm/efi/KvmSecureBootManager.java | 104 +++++ .../org/zstack/kvm/tpm/KvmTpmExtensions.java | 97 ++++- .../main/java/org/zstack/kvm/tpm/TpmTO.java | 9 + .../test/resources/springConfigXml/Kvm.xml | 7 + .../org/zstack/testlib/KVMSimulator.groovy | 20 + .../org/zstack/utils/CollectionUtils.java | 2 + 22 files changed, 1083 insertions(+), 39 deletions(-) create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentFormat.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO_.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO_.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java diff --git a/compute/src/main/java/org/zstack/compute/legacy/ComputeLegacyGlobalProperty.java b/compute/src/main/java/org/zstack/compute/legacy/ComputeLegacyGlobalProperty.java index c0f1c80a38d..be845c85c2b 100644 --- a/compute/src/main/java/org/zstack/compute/legacy/ComputeLegacyGlobalProperty.java +++ b/compute/src/main/java/org/zstack/compute/legacy/ComputeLegacyGlobalProperty.java @@ -10,4 +10,11 @@ public class ComputeLegacyGlobalProperty { */ @GlobalProperty(name="legacyCpuTopologyFix", defaultValue = "false") public static boolean cpuTopologyFix; + /** + * if enableNvRamTypeVolume = true, NvRam type volume will be created if UEFI is enabled; + * if enableNvRamTypeVolume = false, NvRam type volume will not be created, NvRam & TpmState will save in host + * (not in Primary storage); + */ + @GlobalProperty(name="enable.nv.ram.type.volume", defaultValue = "false") + public static boolean enableNvRamTypeVolume; } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java index 6877b2680fc..39ce6fcb840 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java @@ -2,19 +2,29 @@ import org.springframework.beans.factory.annotation.Autowired; import org.zstack.compute.vm.BuildVmSpecExtensionPoint; +import org.zstack.compute.vm.VmSystemTags; +import org.zstack.core.db.Q; +import org.zstack.header.tpm.entity.TpmSpec; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; import org.zstack.header.vm.CreateVmInstanceMsg; import org.zstack.header.vm.DiskAO; import org.zstack.header.vm.VmInstanceCreateExtensionPoint; import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstanceVO; import org.zstack.header.vm.devices.VmDevicesSpec; +import org.zstack.resourceconfig.ResourceConfig; +import org.zstack.resourceconfig.ResourceConfigFacade; +import static org.zstack.compute.vm.VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT; import static org.zstack.header.vm.VmInstanceConstant.NV_RAM_DEFAULT_SIZE; public class VmTpmExtensions implements VmInstanceCreateExtensionPoint, BuildVmSpecExtensionPoint { @Autowired private VmTpmManager vmTpmManager; + @Autowired + private ResourceConfigFacade resourceConfigFacade; @Override public void preCreateVmInstance(CreateVmInstanceMsg msg) { @@ -34,13 +44,32 @@ public void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) { @Override public void afterBuildVmSpec(VmInstanceSpec spec) { String vmUuid = spec.getVmInventory().getUuid(); - if (!vmTpmManager.needRegisterNvRam(vmUuid)) { - return; + + boolean tpmExists = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, vmUuid) + .isExists(); + boolean needRegisterNvRam = tpmExists; + if (!needRegisterNvRam) { + String bootMode = VmSystemTags.BOOT_MODE.getTokenByResourceUuid(vmUuid, VmSystemTags.BOOT_MODE_TOKEN); + if (vmTpmManager.isUefiBootMode(bootMode)) { + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(ENABLE_UEFI_SECURE_BOOT.getIdentity()); + needRegisterNvRam = resourceConfig.getResourceConfigValue(vmUuid, Boolean.class) == Boolean.TRUE; + } } - DiskAO nvRamSpec = new DiskAO(); - nvRamSpec.setSize(NV_RAM_DEFAULT_SIZE); - nvRamSpec.setName("NvRam-of-VM-" + vmUuid); - spec.setNvRamSpec(nvRamSpec); + if (needRegisterNvRam) { + DiskAO nvRamSpec = new DiskAO(); + nvRamSpec.setSize(NV_RAM_DEFAULT_SIZE); + nvRamSpec.setName("NvRam-of-VM-" + vmUuid); + spec.setNvRamSpec(nvRamSpec); + } + + if (tpmExists && (spec.getDevicesSpec() == null || spec.getDevicesSpec().getTpm() == null)) { + VmDevicesSpec devicesSpec = spec.getDevicesSpec() == null ? new VmDevicesSpec() : spec.getDevicesSpec(); + spec.setDevicesSpec(devicesSpec); + + devicesSpec.setTpm(new TpmSpec()); + devicesSpec.getTpm().setEnable(true); + } } } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java index ced08da2918..19099f290a7 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java @@ -1,22 +1,16 @@ package org.zstack.compute.vm.devices; import org.springframework.beans.factory.annotation.Autowired; -import org.zstack.compute.vm.VmSystemTags; import org.zstack.core.Platform; import org.zstack.core.db.DatabaseFacade; -import org.zstack.core.db.Q; import org.zstack.header.image.ImageBootMode; import org.zstack.header.tpm.entity.TpmVO; -import org.zstack.header.tpm.entity.TpmVO_; -import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; import java.util.Objects; -import static org.zstack.compute.vm.VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT; - public class VmTpmManager { private static final CLogger logger = Utils.getLogger(VmTpmManager.class); @@ -39,23 +33,6 @@ public TpmVO persistTpmVO(String tpmUuid, String vmUuid) { return tpm; } - public boolean needRegisterNvRam(String vmUuid) { - boolean tpmExists = Q.New(TpmVO.class) - .eq(TpmVO_.vmInstanceUuid, vmUuid) - .isExists(); - if (tpmExists) { - return true; - } - - String bootMode = VmSystemTags.BOOT_MODE.getTokenByResourceUuid(vmUuid, VmSystemTags.BOOT_MODE_TOKEN); - if (!isUefiBootMode(bootMode)) { - return false; - } - - ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(ENABLE_UEFI_SECURE_BOOT.getIdentity()); - return resourceConfig.getResourceConfigValue(vmUuid, Boolean.class) == Boolean.TRUE; - } - /** * @param bootMode boot mode, null is Legacy */ diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index 590c58ee528..73125957d48 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -21,6 +21,32 @@ CREATE TABLE IF NOT EXISTS `zstack`.`TpmHostRefVO` ( CONSTRAINT `fkTpmHostRefVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileVO` ( + `uuid` char(32) NOT NULL UNIQUE, + `vmInstanceUuid` char(32) NOT NULL, + `hostUuid` char(32) NOT NULL, + `type` varchar(64) NOT NULL COMMENT 'NvRam, TpmState, NvRamBackup, TpmStateBackup', + `path` varchar(1024) NOT NULL COMMENT 'Absolute path of the file on the host', + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + INDEX `idxVmHostFileVOVmInstanceUuid` (`vmInstanceUuid`), + INDEX `idxVmHostFileVOHostUuid` (`hostUuid`), + CONSTRAINT `fkVmHostFileVOVmInstanceVO` FOREIGN KEY (`vmInstanceUuid`) REFERENCES `VmInstanceEO` (`uuid`) ON DELETE CASCADE, + CONSTRAINT `fkVmHostFileVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON DELETE CASCADE, + UNIQUE KEY `ukVmHostFileVO` (`vmInstanceUuid`, `hostUuid`, `type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileContentVO` ( + `uuid` char(32) NOT NULL UNIQUE, + `content` MEDIUMBLOB DEFAULT '', + `format` varchar(64) NOT NULL COMMENT 'Raw, TarballGzip', + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + CONSTRAINT `fkVmHostFileContentVOVmHostFileVO` FOREIGN KEY (`uuid`) REFERENCES `VmHostFileVO` (`uuid`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + -- Feature: KMS | ZSPHER-46, ZSPHER-60, ZSPHER-61, ZSPHER-62 CREATE TABLE IF NOT EXISTS `zstack`.`KeyProviderVO` ( diff --git a/conf/persistence.xml b/conf/persistence.xml index 5a1b855d9e8..eb4d624e6af 100755 --- a/conf/persistence.xml +++ b/conf/persistence.xml @@ -20,6 +20,8 @@ org.zstack.header.managementnode.ManagementNodeContextVO org.zstack.header.tpm.entity.TpmHostRefVO org.zstack.header.tpm.entity.TpmVO + org.zstack.header.vm.additions.VmHostFileVO + org.zstack.header.vm.additions.VmHostFileContentVO org.zstack.header.zone.ZoneVO org.zstack.header.zone.ZoneEO org.zstack.header.cluster.ClusterVO diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 16cd80fadf9..88886397a1f 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -255,6 +255,13 @@ + + + + + + + diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentFormat.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentFormat.java new file mode 100644 index 00000000000..1212dc054bc --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentFormat.java @@ -0,0 +1,6 @@ +package org.zstack.header.vm.additions; + +public enum VmHostFileContentFormat { + Raw, + TarballGzip, +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java new file mode 100644 index 00000000000..4995d5b0f38 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java @@ -0,0 +1,93 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.SoftDeletionCascade; +import org.zstack.header.vo.SoftDeletionCascades; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.Id; +import javax.persistence.Table; +import java.sql.Timestamp; + +/** + * Virtual Machine Host-side File Content Value Object + */ +@Entity +@Table +@SoftDeletionCascades({ + @SoftDeletionCascade(parent = VmHostFileVO.class, joinColumn = "uuid"), +}) +@EntityGraph( + friends = { + @EntityGraph.Neighbour(type = VmHostFileVO.class, myField = "uuid", targetField = "uuid"), + } +) +public class VmHostFileContentVO { + @Id + @Column + @ForeignKey(parentEntityClass = VmHostFileVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String uuid; + @Column + private byte[] content; + @Column + @Enumerated(EnumType.STRING) + private VmHostFileContentFormat format; + @Column + private Timestamp createDate; + @Column + private Timestamp lastOpDate; + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public byte[] getContent() { + return content; + } + + public void setContent(byte[] content) { + this.content = content; + } + + public VmHostFileContentFormat getFormat() { + return format; + } + + public void setFormat(VmHostFileContentFormat format) { + this.format = format; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + @Override + public String toString() { + return "VmHostFileContentVO{" + + "uuid='" + uuid + '\'' + + ", format=" + format + + ", createDate=" + createDate + + ", lastOpDate=" + lastOpDate + + '}'; + } +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO_.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO_.java new file mode 100644 index 00000000000..c375650337b --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO_.java @@ -0,0 +1,14 @@ +package org.zstack.header.vm.additions; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(VmHostFileContentVO.class) +public class VmHostFileContentVO_ { + public static volatile SingularAttribute uuid; + public static volatile SingularAttribute content; + public static volatile SingularAttribute format; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java new file mode 100644 index 00000000000..16c493e6768 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java @@ -0,0 +1,8 @@ +package org.zstack.header.vm.additions; + +public enum VmHostFileType { + NvRam, + TpmState, + NvRamBackup, + TpmStateBackup, +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO.java new file mode 100644 index 00000000000..c0691ad5dac --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO.java @@ -0,0 +1,107 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.host.HostEO; +import org.zstack.header.host.HostVO; +import org.zstack.header.vm.VmInstanceEO; +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.ResourceVO; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.Table; +import java.sql.Timestamp; + +/** + * Virtual Machine Host-side File Value Object + * + * Include: NvRam / TpmState files + */ +@Entity +@Table +@EntityGraph( + friends = { + @EntityGraph.Neighbour(type = VmInstanceEO.class, myField = "vmInstanceUuid", targetField = "uuid"), + @EntityGraph.Neighbour(type = HostVO.class, myField = "hostUuid", targetField = "uuid"), + } +) +public class VmHostFileVO extends ResourceVO { + @Column + @ForeignKey(parentEntityClass = VmInstanceEO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String vmInstanceUuid; + @Column + @ForeignKey(parentEntityClass = HostEO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String hostUuid; + @Column + @Enumerated(EnumType.STRING) + private VmHostFileType type; + @Column + private String path; + @Column + private Timestamp createDate; + @Column + private Timestamp lastOpDate; + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public VmHostFileType getType() { + return type; + } + + public void setType(VmHostFileType type) { + this.type = type; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + @Override + public String toString() { + return "VmHostFileVO{" + + "uuid='" + uuid + '\'' + + ", vmInstanceUuid='" + vmInstanceUuid + '\'' + + ", hostUuid='" + hostUuid + '\'' + + ", type=" + type + + ", path='" + path + '\'' + + ", createDate=" + createDate + + ", lastOpDate=" + lastOpDate + + '}'; + } +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO_.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO_.java new file mode 100644 index 00000000000..39fdb742797 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileVO_.java @@ -0,0 +1,17 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.vo.ResourceVO_; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(VmHostFileVO.class) +public class VmHostFileVO_ extends ResourceVO_ { + public static volatile SingularAttribute vmInstanceUuid; + public static volatile SingularAttribute hostUuid; + public static volatile SingularAttribute type; + public static volatile SingularAttribute path; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 16865309ec0..63a2f9759e7 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -11,6 +11,7 @@ import org.zstack.header.host.VmNicRedirectConfig; import org.zstack.header.log.NoLogging; import org.zstack.header.vm.*; +import org.zstack.header.vm.additions.VmHostFileContentFormat; import org.zstack.header.vm.devices.DeviceAddress; import org.zstack.header.vm.devices.VirtualDeviceInfo; import org.zstack.kvm.tpm.TpmTO; @@ -25,6 +26,7 @@ import static org.zstack.utils.CollectionDSL.e; import static org.zstack.utils.CollectionDSL.map; +import static org.zstack.utils.CollectionUtils.transform; import static org.zstack.utils.opaque.OpaqueConstants.OPAQUE_KEY_RESPONSE_ERROR; public class KVMAgentCommands { @@ -2727,6 +2729,63 @@ public void setVmCpuVendorId(String vmCpuVendorId) { public static class StartVmResponse extends VmDevicesInfoResponse { } + public static class VmHostFileTO { + private String path; + /** + * maybe "NvRam" or "TpmState" ... + * @see org.zstack.header.vm.additions.VmHostFileType + */ + private String type; + /** + * maybe "Simple" or "TarballGzip" + * @see VmHostFileContentFormat + */ + private String fileFormat; + @NoLogging + private String contentBase64; + private String error; + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getFileFormat() { + return fileFormat; + } + + public void setFileFormat(String fileFormat) { + this.fileFormat = fileFormat; + } + + public String getContentBase64() { + return contentBase64; + } + + public void setContentBase64(String contentBase64) { + this.contentBase64 = contentBase64; + } + + public String getError() { + return error; + } + + public void setError(String error) { + this.error = error; + } + } + public static class VmDevicesInfoResponse extends AgentResponse { private List nicInfos; private List virtualDeviceInfoList; @@ -2800,6 +2859,52 @@ public void setVmInstanceUuid(String vmInstanceUuid) { public static class SyncVmDeviceInfoResponse extends VmDevicesInfoResponse { } + public static class ReadVmHostFileContentCmd extends AgentCommand { + /** + * without contentBase64, fileFormat + */ + private List hostFiles = new ArrayList<>(); + + public List getPaths() { + return transform(hostFiles, VmHostFileTO::getPath); + } + + public List getHostFiles() { + return hostFiles; + } + + public void setHostFiles(List hostFiles) { + this.hostFiles = hostFiles; + } + } + + public static class ReadVmHostFileContentResponse extends AgentResponse { + private List hostFiles = new ArrayList<>(); + + public List getHostFiles() { + return hostFiles; + } + + public void setHostFiles(List hostFiles) { + this.hostFiles = hostFiles; + } + } + + public static class WriteVmHostFileContentCmd extends AgentCommand { + private List hostFiles = new ArrayList<>(); + + public List getHostFiles() { + return hostFiles; + } + + public void setHostFiles(List hostFiles) { + this.hostFiles = hostFiles; + } + } + + public static class WriteVmHostFileContentResponse extends AgentResponse { + } + public static class VmNicInfo { private String macAddress; private DeviceAddress deviceAddress; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index cb79da59838..cae9533b7f2 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -85,6 +85,8 @@ public interface KVMConstant { String KVM_REGISTER_PRIMARY_VM_HEARTBEAT = "/register/primary/vm/heartbeat"; String CLEAN_FIRMWARE_FLASH = "/clean/firmware/flash"; String FSTRIM_VM_PATH = "/vm/fstrim"; + String READ_VM_HOST_FILE_PATH = "/vm/hostfile/read"; + String WRITE_VM_HOST_FILE_PATH = "/vm/hostfile/write"; String ISO_TO = "kvm.isoto"; String ANSIBLE_PLAYBOOK_NAME = "kvm.py"; @@ -184,6 +186,15 @@ public interface KVMConstant { public static final String L2_PROVIDER_TYPE_MACVLAN = "MacVlan"; public static final String EDK_VERSION_NONE = "None"; + public static final String NV_RAM_FILE_PATH_FORMAT = "/var/lib/libvirt/qemu/nvram/%s-host-files/%s.fd"; + public static String buildNvramFilePath(String vmUuid) { + return String.format(NV_RAM_FILE_PATH_FORMAT, vmUuid, vmUuid); + } + + public static final String TPM_STATE_FILE_PATH_FORMAT = "/var/lib/libvirt/swtpm/%s/"; + public static String buildTpmStateFilePath(String vmUuid) { + return String.format(TPM_STATE_FILE_PATH_FORMAT, vmUuid); + } public static final String DHCP_BIN_FILE_PATH = "/usr/local/zstack/dnsmasq"; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 9d22cf50976..adcfc545808 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -230,6 +230,8 @@ public class KVMHost extends HostBase implements Host { private String fileDownloadPath; private String fileUploadPath; private String fileDownloadProgressPath; + private String readVmHostFilePath; + private String writeVmHostFilePath; public KVMHost(KVMHostVO self, KVMHostContext context) { super(self); @@ -480,6 +482,14 @@ public KVMHost(KVMHostVO self, KVMHostContext context) { ub = UriComponentsBuilder.fromHttpUrl(baseUrl); ub.path(KVMConstant.KVM_HOST_FILE_DOWNLOAD_PROGRESS_PATH); fileDownloadProgressPath = ub.build().toString(); + + ub = UriComponentsBuilder.fromHttpUrl(baseUrl); + ub.path(KVMConstant.READ_VM_HOST_FILE_PATH); + readVmHostFilePath = ub.build().toString(); + + ub = UriComponentsBuilder.fromHttpUrl(baseUrl); + ub.path(KVMConstant.WRITE_VM_HOST_FILE_PATH); + writeVmHostFilePath = ub.build().toString(); } static { diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 8f2e33af8ff..f9d8f923a76 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -1,13 +1,18 @@ package org.zstack.kvm.efi; import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.legacy.ComputeLegacyGlobalProperty; import org.zstack.compute.vm.VmGlobalConfig; import org.zstack.compute.vm.devices.VmTpmManager; +import org.zstack.core.Platform; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.CloudBusCallBack; +import org.zstack.core.db.DatabaseFacade; import org.zstack.core.db.Q; +import org.zstack.core.db.SQL; import org.zstack.core.workflow.SimpleFlowChain; import org.zstack.header.core.Completion; +import org.zstack.header.core.ReturnValueCompletion; import org.zstack.header.core.workflow.Flow; import org.zstack.header.core.workflow.FlowDoneHandler; import org.zstack.header.core.workflow.FlowErrorHandler; @@ -23,6 +28,12 @@ import org.zstack.header.vm.PreVmInstantiateResourceExtensionPoint; import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstantiateResourceException; +import org.zstack.header.vm.additions.VmHostFileContentFormat; +import org.zstack.header.vm.additions.VmHostFileContentVO; +import org.zstack.header.vm.additions.VmHostFileContentVO_; +import org.zstack.header.vm.additions.VmHostFileType; +import org.zstack.header.vm.additions.VmHostFileVO; +import org.zstack.header.vm.additions.VmHostFileVO_; import org.zstack.header.volume.CreateVolumeMsg; import org.zstack.header.volume.CreateVolumeReply; import org.zstack.header.volume.DeleteVolumeMsg; @@ -35,9 +46,12 @@ import org.zstack.header.volume.VolumeVO; import org.zstack.header.volume.VolumeVO_; import org.zstack.kvm.KVMAgentCommands; +import org.zstack.kvm.KVMAgentCommands.*; import org.zstack.kvm.KVMGlobalConfig; import org.zstack.kvm.KVMHostInventory; import org.zstack.kvm.KVMStartVmExtensionPoint; +import org.zstack.kvm.KvmCommandSender; +import org.zstack.kvm.KvmResponseWrapper; import org.zstack.kvm.VolumeTO; import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; @@ -45,11 +59,19 @@ import org.zstack.utils.logging.CLogger; import javax.persistence.Tuple; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Objects; import static org.zstack.core.Platform.operr; -import static org.zstack.kvm.KVMConstant.EDK_VERSION_NONE; +import static org.zstack.kvm.KVMConstant.*; +import static org.zstack.utils.CollectionUtils.findOneOrNull; +import static org.zstack.utils.CollectionUtils.transform; public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint, PreVmInstantiateResourceExtensionPoint { @@ -59,6 +81,10 @@ public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint, private CloudBus bus; @Autowired private ResourceConfigFacade resourceConfigFacade; + @Autowired + private DatabaseFacade databaseFacade; + + private final Object hostFileLock = new Object(); @Override public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { @@ -80,19 +106,204 @@ public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAg } private void prepareNvRamToStartVmCmd(KVMAgentCommands.StartVmCmd cmd, DiskAO nvRamSpec, KVMHostInventory host) { - VolumeVO vo = Q.New(VolumeVO.class) - .eq(VolumeVO_.uuid, nvRamSpec.getSourceUuid()) - .find(); - if (vo == null) { - if (nvRamSpec.getSourceUuid() != null) { - throw new CloudRuntimeException(String.format("cannot find NvRam volume[uuid:%s]", nvRamSpec.getSourceUuid())); + if (ComputeLegacyGlobalProperty.enableNvRamTypeVolume) { + VolumeVO vo = Q.New(VolumeVO.class) + .eq(VolumeVO_.uuid, nvRamSpec.getSourceUuid()) + .find(); + if (vo == null) { + if (nvRamSpec.getSourceUuid() != null) { + throw new CloudRuntimeException(String.format("cannot find NvRam volume[uuid:%s]", nvRamSpec.getSourceUuid())); + } + return; } + + VolumeInventory nvRamVolume = VolumeInventory.valueOf(vo); + VolumeTO volume = VolumeTO.valueOfWithOutExtension(nvRamVolume, host, null); + cmd.setNvRam(volume); return; } - VolumeInventory nvRamVolume = VolumeInventory.valueOf(vo); - VolumeTO volume = VolumeTO.valueOfWithOutExtension(nvRamVolume, host, null); + VolumeTO volume = new VolumeTO(); + volume.setDeviceType(VolumeTO.FILE); + volume.setInstallPath(buildNvramFilePath(cmd.getVmInstanceUuid())); + volume.setVolumeUuid(null); // not a volume cmd.setNvRam(volume); + + synchronized (hostFileLock) { + VmHostFileVO nvRamFile = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, cmd.getVmInstanceUuid()) + .eq(VmHostFileVO_.type, VmHostFileType.NvRam) + .eq(VmHostFileVO_.hostUuid, host.getUuid()) + .find(); + if (nvRamFile == null) { + nvRamFile = new VmHostFileVO(); + nvRamFile.setUuid(Platform.getUuid()); + nvRamFile.setHostUuid(host.getUuid()); + nvRamFile.setVmInstanceUuid(cmd.getVmInstanceUuid()); + nvRamFile.setType(VmHostFileType.NvRam); + nvRamFile.setPath(volume.getInstallPath()); + nvRamFile.setCreateDate(Timestamp.from(Instant.now())); + nvRamFile.setResourceName("NvRam file for " + cmd.getVmInstanceUuid()); + databaseFacade.persist(nvRamFile); + } else { + SQL.New(VmHostFileVO.class) + .eq(VmHostFileVO_.uuid, nvRamFile.getUuid()) + .set(VmHostFileVO_.path, volume.getInstallPath()) + .set(VmHostFileVO_.lastOpDate, Timestamp.from(Instant.now())) + .update(); + } + } + } + + public static class SyncVmHostFilesFromHostContext { + public String hostUuid; + public String vmUuid; + + public String nvRamPath; + public String tpmStateFolder; + } + + public void syncVmHostFilesFromHost(SyncVmHostFilesFromHostContext context, Completion completion) { + KvmCommandSender sender = new KvmCommandSender(context.hostUuid); + + ReadVmHostFileContentCmd cmd = new ReadVmHostFileContentCmd(); + cmd.setHostFiles(new ArrayList<>()); + if (context.tpmStateFolder != null) { + VmHostFileTO to = new VmHostFileTO(); + to.setPath(context.tpmStateFolder); + to.setType(VmHostFileType.TpmState.toString()); + cmd.getHostFiles().add(to); + } + if (context.nvRamPath != null) { + VmHostFileTO to = new VmHostFileTO(); + to.setPath(context.nvRamPath); + to.setType(VmHostFileType.NvRam.toString()); + cmd.getHostFiles().add(to); + } + + sender.send(cmd, READ_VM_HOST_FILE_PATH, wrapper -> { + ReadVmHostFileContentResponse readRsp = wrapper.getResponse(ReadVmHostFileContentResponse.class); + return readRsp.isSuccess() ? null : + operr("failed to read file content response").withException(readRsp.getError()); + }, new ReturnValueCompletion(completion) { + @Override + public void success(KvmResponseWrapper wrapper) { + ReadVmHostFileContentResponse readRsp = wrapper.getResponse(ReadVmHostFileContentResponse.class); + if (!readRsp.isSuccess()) { + completion.fail(operr("failed to read file content response").withException(readRsp.getError())); + return; + } + + final List existsFiles = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, context.vmUuid) + .eq(VmHostFileVO_.hostUuid, context.hostUuid) + .in(VmHostFileVO_.path, cmd.getPaths()) + .list(); + final List existsContentUuid; + if (!existsFiles.isEmpty()) { + existsContentUuid = Q.New(VmHostFileContentVO.class) + .in(VmHostFileContentVO_.uuid, transform(existsFiles, VmHostFileVO::getUuid)) + .select(VmHostFileContentVO_.uuid) + .listValues(); + } else { + existsContentUuid = Collections.emptyList(); + } + + for (String path : cmd.getPaths()) { + VmHostFileTO to = findOneOrNull(readRsp.getHostFiles(), item -> item.getPath().equals(path)); + if (to == null) { + continue; + } + if (to.getError() != null) { + logger.warn(String.format("failed to read file content from host[uuid=%s] with file %s: %s", + context.hostUuid, path, to.getError())); + continue; + } + + VmHostFileType type = Objects.equals(path, context.nvRamPath) ? + VmHostFileType.NvRam : VmHostFileType.TpmState; + + VmHostFileVO file = findOneOrNull(existsFiles, item -> item.getPath().equals(path)); + boolean fileExists = file != null; + + Timestamp now = Timestamp.from(Instant.now()); + if (fileExists) { + SQL.New(VmHostFileVO.class) + .eq(VmHostFileVO_.uuid, file.getUuid()) + .set(VmHostFileVO_.lastOpDate, now) + .update(); + } else { + file = new VmHostFileVO(); + file.setUuid(Platform.getUuid()); + file.setHostUuid(context.hostUuid); + file.setVmInstanceUuid(context.vmUuid); + file.setPath(path); + file.setType(type); + file.setCreateDate(now); + file.setLastOpDate(now); + file.setResourceName(String.format("%s file for %s", type, context.vmUuid)); + databaseFacade.persist(file); + } + + byte[] bytes = Base64.getDecoder().decode(to.getContentBase64()); + if (existsContentUuid.contains(file.getUuid())) { + SQL.New(VmHostFileContentVO.class) + .eq(VmHostFileContentVO_.uuid, file.getUuid()) + .set(VmHostFileContentVO_.content, bytes) + .set(VmHostFileContentVO_.format, VmHostFileContentFormat.valueOf(to.getFileFormat())) + .set(VmHostFileContentVO_.lastOpDate, now) + .update(); + } else { + VmHostFileContentVO content = new VmHostFileContentVO(); + content.setUuid(file.getUuid()); + content.setContent(bytes); + content.setFormat(VmHostFileContentFormat.valueOf(to.getFileFormat())); + content.setCreateDate(now); + content.setLastOpDate(now); + databaseFacade.persist(content); + } + } + + completion.success(); + } + + @Override + public void fail(ErrorCode errorCode) { + completion.fail(errorCode); + } + }); + } + + public static class RewriteVmHostFilesContext { + public String hostUuid; + public List hostFiles; + } + + public void rewriteVmHostFiles(RewriteVmHostFilesContext context, Completion completion) { + KvmCommandSender sender = new KvmCommandSender(context.hostUuid); + KVMAgentCommands.WriteVmHostFileContentCmd cmd = new KVMAgentCommands.WriteVmHostFileContentCmd(); + cmd.setHostFiles(context.hostFiles); + + sender.send(cmd, WRITE_VM_HOST_FILE_PATH, wrapper -> { + KVMAgentCommands.WriteVmHostFileContentResponse writeRsp = wrapper.getResponse(KVMAgentCommands.WriteVmHostFileContentResponse.class); + return writeRsp.isSuccess() ? null : + operr("failed to write file content response").withException(writeRsp.getError()); + }, new ReturnValueCompletion(completion) { + @Override + public void success(KvmResponseWrapper wrapper) { + KVMAgentCommands.WriteVmHostFileContentResponse writeRsp = wrapper.getResponse(KVMAgentCommands.WriteVmHostFileContentResponse.class); + if (!writeRsp.isSuccess()) { + completion.fail(operr("failed to write file content response").withException(writeRsp.getError())); + return; + } + completion.success(); + } + + @Override + public void fail(ErrorCode errorCode) { + completion.fail(errorCode); + } + }); } @Override @@ -116,6 +327,14 @@ public void preBeforeInstantiateVmResource(VmInstanceSpec spec) throws VmInstant @Override public void preInstantiateVmResource(VmInstanceSpec spec, Completion completion) { + if (ComputeLegacyGlobalProperty.enableNvRamTypeVolume) { + prepareNvRamVolumeOnHost(spec, completion); + } else { + prepareNvRamHostFileOnHost(spec, completion); + } + } + + private void prepareNvRamVolumeOnHost(VmInstanceSpec spec, Completion completion) { final DiskAO nvRamSpec = spec.getNvRamSpec(); boolean needRegisterNvRam = nvRamSpec != null; @@ -180,6 +399,175 @@ public void fail(ErrorCode errorCode) { }); } + public static class PrepareHostFileContext { + public String hostUuid; + public String vmUuid; + public VmHostFileType type; + + // whether the NvRam is on the same host as before + private boolean sameHost = false; + private VmHostFileVO vmHostFile; + } + + @SuppressWarnings("rawtypes") + public void prepareHostFileOnHost(PrepareHostFileContext context, Completion completion) { + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setName("prepare-vm-host-file"); + chain.then(new NoRollbackFlow() { + String __name__ = "read-vm-host-file-from-origin-host"; + + @Override + public void run(FlowTrigger trigger, Map data) { + VmHostFileVO vmHostFile = context.vmHostFile = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.type, context.type) + .eq(VmHostFileVO_.vmInstanceUuid, context.vmUuid) + .orderByDesc(VmHostFileVO_.lastOpDate) + .limit(1) + .find(); + context.sameHost = vmHostFile != null && vmHostFile.getHostUuid().equals(context.hostUuid); + if (context.sameHost) { + logger.debug(String.format("skip to read/write %s host file for VM[vmUuid=%s]: vm.host is not changed", + context.type, context.vmUuid)); + trigger.next(); + return; + } + + if (vmHostFile == null) { + logger.debug(String.format("skip to read/write %s host file for VM[vmUuid=%s]: file is not registered in MN", + context.type, context.vmUuid)); + trigger.next(); + return; + } + + SyncVmHostFilesFromHostContext syncContext = new SyncVmHostFilesFromHostContext(); + syncContext.hostUuid = vmHostFile.getHostUuid(); + syncContext.vmUuid = context.vmUuid; + + if (vmHostFile.getType() == VmHostFileType.NvRam) { + syncContext.nvRamPath = vmHostFile.getPath(); + } else if (vmHostFile.getType() == VmHostFileType.TpmState) { + syncContext.tpmStateFolder = vmHostFile.getPath(); + } else { + throw new CloudRuntimeException("unsupported vm host file type: " + vmHostFile.getType()); + } + + syncVmHostFilesFromHost(syncContext, new Completion(trigger) { + @Override + public void success() { + trigger.next(); + } + + @Override + public void fail(ErrorCode errorCode) { + trigger.fail(errorCode); + } + }); + } + }).then(new NoRollbackFlow() { + String __name__ = "write-vm-host-file-to-dest-host"; + + @Override + public boolean skip(Map data) { + return context.sameHost || context.vmHostFile == null; + } + + @Override + public void run(FlowTrigger trigger, Map data) { + VmHostFileContentVO content = Q.New(VmHostFileContentVO.class) + .eq(VmHostFileContentVO_.uuid, context.vmHostFile.getUuid()) + .find(); + if (content == null) { + logger.debug(String.format("skip to write vm host file for VM[vmUuid=%s]: file content is not saved in MN", + context.vmUuid)); + trigger.next(); + return; + } + + VmHostFileTO to = new VmHostFileTO(); + to.setPath(context.vmHostFile.getPath()); + to.setType(context.vmHostFile.getType().toString()); + to.setFileFormat(content.getFormat().toString()); + + String contentBase64 = Base64.getEncoder().encodeToString(content.getContent()); + to.setContentBase64(contentBase64); + + RewriteVmHostFilesContext rewriteContext = new RewriteVmHostFilesContext(); + rewriteContext.hostUuid = context.hostUuid; + rewriteContext.hostFiles = Collections.singletonList(to); + + rewriteVmHostFiles(rewriteContext, new Completion(trigger) { + @Override + public void success() { + trigger.next(); + } + + @Override + public void fail(ErrorCode errorCode) { + trigger.fail(errorCode); + } + }); + } + }).then(new NoRollbackFlow() { + String __name__ = "re-read-vm-host-file-from-dest-host"; + + @Override + public boolean skip(Map data) { + // if context.sameHost is true, we also need to re-read the host file for cache. + return context.vmHostFile == null; + } + + @Override + public void run(FlowTrigger trigger, Map data) { + KvmSecureBootExtensions.SyncVmHostFilesFromHostContext syncBackContext = + new KvmSecureBootExtensions.SyncVmHostFilesFromHostContext(); + syncBackContext.hostUuid = context.hostUuid; + syncBackContext.vmUuid = context.vmUuid; + + if (context.type == VmHostFileType.NvRam) { + syncBackContext.nvRamPath = context.vmHostFile.getPath(); + } else if (context.type == VmHostFileType.TpmState) { + syncBackContext.tpmStateFolder = context.vmHostFile.getPath(); + } + + syncVmHostFilesFromHost(syncBackContext, new Completion(trigger) { + @Override + public void success() { + trigger.next(); + } + + @Override + public void fail(ErrorCode errorCode) { + trigger.fail(errorCode); + } + }); + } + }).done(new FlowDoneHandler(completion) { + @Override + public void handle(Map data) { + completion.success(); + } + }).error(new FlowErrorHandler(completion) { + @Override + public void handle(ErrorCode errCode, Map data) { + completion.fail(errCode); + } + }).start(); + } + + private void prepareNvRamHostFileOnHost(VmInstanceSpec spec, Completion completion) { + final DiskAO nvRamSpec = spec.getNvRamSpec(); + if (nvRamSpec == null) { + completion.success(); + return; + } + + PrepareHostFileContext context = new PrepareHostFileContext(); + context.hostUuid = spec.getDestHost().getUuid(); + context.vmUuid = spec.getVmInventory().getUuid(); + context.type = VmHostFileType.NvRam; + prepareHostFileOnHost(context, completion); + } + @Override public void preReleaseVmResource(VmInstanceSpec spec, Completion completion) { completion.success(); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java new file mode 100644 index 00000000000..e6fc30333cc --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java @@ -0,0 +1,104 @@ +package org.zstack.kvm.efi; + +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.legacy.ComputeLegacyGlobalProperty; +import org.zstack.core.cloudbus.EventCallback; +import org.zstack.core.cloudbus.EventFacadeImpl; +import org.zstack.core.db.Q; +import org.zstack.header.Component; +import org.zstack.header.core.Completion; +import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.vm.VmCanonicalEvents; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.header.vm.VmInstanceVO_; +import org.zstack.header.vm.additions.VmHostFileType; +import org.zstack.header.vm.additions.VmHostFileVO; +import org.zstack.header.vm.additions.VmHostFileVO_; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +import javax.persistence.Tuple; +import java.util.List; +import java.util.Map; + +import static org.zstack.utils.CollectionDSL.list; +import static org.zstack.utils.CollectionUtils.findOneOrNull; + +public class KvmSecureBootManager implements Component { + private static final CLogger logger = Utils.getLogger(KvmSecureBootManager.class); + + @Autowired + private EventFacadeImpl eventFacade; + @Autowired + private KvmSecureBootExtensions secureBootExtensions; + + @Override + public boolean start() { + setupCanonicalEvents(); + return true; + } + + @Override + public boolean stop() { + return true; + } + + @SuppressWarnings("rawtypes") + private void setupCanonicalEvents() { + eventFacade.on(VmCanonicalEvents.VM_LIBVIRT_REPORT_SHUTDOWN, new EventCallback() { + @Override + protected void run(Map tokens, Object data) { + if (ComputeLegacyGlobalProperty.enableNvRamTypeVolume) { + return; + } + + String vmUuid = (String) data; + Tuple tuple = Q.New(VmInstanceVO.class) + .select(VmInstanceVO_.hostUuid, VmInstanceVO_.lastHostUuid) + .eq(VmInstanceVO_.uuid, vmUuid) + .findTuple(); + if (tuple == null) { + return; + } + + String hostUuid = (String) tuple.get(0); + if (hostUuid == null) { + hostUuid = (String) tuple.get(1); + } + + List hostFiles = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, vmUuid) + .eq(VmHostFileVO_.hostUuid, hostUuid) + .in(VmHostFileVO_.type, list(VmHostFileType.NvRam, VmHostFileType.TpmState)) + .list(); + if (hostFiles.isEmpty()) { + return; + } + + VmHostFileVO nvRamFile = findOneOrNull(hostFiles, it -> it.getType() == VmHostFileType.NvRam); + VmHostFileVO tpmStateFile = findOneOrNull(hostFiles, it -> it.getType() == VmHostFileType.TpmState); + if (nvRamFile == null && tpmStateFile == null) { + return; + } + + KvmSecureBootExtensions.SyncVmHostFilesFromHostContext context = new KvmSecureBootExtensions.SyncVmHostFilesFromHostContext(); + context.hostUuid = hostUuid; + context.vmUuid = vmUuid; + context.nvRamPath = nvRamFile == null ? null : nvRamFile.getPath(); + context.tpmStateFolder = tpmStateFile == null ? null : tpmStateFile.getPath(); + secureBootExtensions.syncVmHostFilesFromHost(context, new Completion(null) { + @Override + public void success() { + logger.info(String.format("success to read file content from host[uuid=%s]", context.hostUuid)); + } + + @Override + public void fail(ErrorCode errorCode) { + logger.warn(String.format("failed to read file content from host[uuid=%s]: %s", + context.hostUuid, errorCode.getReadableDetails())); + } + }); + } + }); + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java index 7ff72c6affa..0d98a310d7a 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmExtensions.java @@ -1,13 +1,43 @@ package org.zstack.kvm.tpm; +import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.core.Platform; +import org.zstack.core.db.DatabaseFacade; +import org.zstack.core.db.Q; +import org.zstack.core.db.SQL; +import org.zstack.header.core.Completion; import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.vm.PreVmInstantiateResourceExtensionPoint; import org.zstack.header.vm.VmInstanceSpec; +import org.zstack.header.vm.VmInstantiateResourceException; +import org.zstack.header.vm.additions.VmHostFileType; +import org.zstack.header.vm.additions.VmHostFileVO; +import org.zstack.header.vm.additions.VmHostFileVO_; import org.zstack.header.vm.devices.VmDevicesSpec; import org.zstack.kvm.KVMAgentCommands; import org.zstack.kvm.KVMHostInventory; import org.zstack.kvm.KVMStartVmExtensionPoint; +import org.zstack.kvm.efi.KvmSecureBootExtensions; +import org.zstack.kvm.efi.KvmSecureBootExtensions.*; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +import java.sql.Timestamp; +import java.time.Instant; + +import static org.zstack.kvm.KVMConstant.*; + +public class KvmTpmExtensions implements KVMStartVmExtensionPoint, + PreVmInstantiateResourceExtensionPoint { + private static final CLogger logger = Utils.getLogger(KvmTpmExtensions.class); + + @Autowired + private KvmSecureBootExtensions secureBootExtensions; + @Autowired + private DatabaseFacade databaseFacade; + + private final Object hostFileLock = new Object(); -public class KvmTpmExtensions implements KVMStartVmExtensionPoint { @Override public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAgentCommands.StartVmCmd cmd) { final VmDevicesSpec devicesSpec = spec.getDevicesSpec(); @@ -17,7 +47,33 @@ public void beforeStartVmOnKvm(KVMHostInventory host, VmInstanceSpec spec, KVMAg TpmTO tpm = new TpmTO(); tpm.setKeyProviderUuid(devicesSpec.getTpm().getKeyProviderUuid()); + tpm.setInstallPath(buildTpmStateFilePath(cmd.getVmInstanceUuid())); cmd.setTpm(tpm); + + synchronized (hostFileLock) { + VmHostFileVO tpmStateFile = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, cmd.getVmInstanceUuid()) + .eq(VmHostFileVO_.type, VmHostFileType.TpmState) + .eq(VmHostFileVO_.hostUuid, host.getUuid()) + .find(); + if (tpmStateFile == null) { + tpmStateFile = new VmHostFileVO(); + tpmStateFile.setUuid(Platform.getUuid()); + tpmStateFile.setHostUuid(host.getUuid()); + tpmStateFile.setVmInstanceUuid(cmd.getVmInstanceUuid()); + tpmStateFile.setType(VmHostFileType.TpmState); + tpmStateFile.setPath(tpm.getInstallPath()); + tpmStateFile.setCreateDate(Timestamp.from(Instant.now())); + tpmStateFile.setResourceName("TpmState file for " + cmd.getVmInstanceUuid()); + databaseFacade.persist(tpmStateFile); + } else { + SQL.New(VmHostFileVO.class) + .eq(VmHostFileVO_.uuid, tpmStateFile.getUuid()) + .set(VmHostFileVO_.path, tpm.getInstallPath()) + .set(VmHostFileVO_.lastOpDate, Timestamp.from(Instant.now())) + .update(); + } + } } @Override @@ -29,4 +85,43 @@ public void startVmOnKvmSuccess(KVMHostInventory host, VmInstanceSpec spec) { public void startVmOnKvmFailed(KVMHostInventory host, VmInstanceSpec spec, ErrorCode err) { // do-nothing } + + @Override + public void preBeforeInstantiateVmResource(VmInstanceSpec spec) throws VmInstantiateResourceException { + // do-nothing + } + + @Override + public void preInstantiateVmResource(VmInstanceSpec spec, Completion completion) { + prepareTpmStateHostFileOnHost(spec, completion); + } + + static class PrepareTpmStateHostFileContext { + String hostUuid; + String vmUuid; + + // whether the NvRam is on the same host as before + boolean sameHost = false; + VmHostFileVO tpmStateFile; + } + + private void prepareTpmStateHostFileOnHost(VmInstanceSpec spec, Completion completion) { + final VmDevicesSpec devicesSpec = spec.getDevicesSpec(); + if (devicesSpec == null || devicesSpec.getTpm() == null || !devicesSpec.getTpm().isEnable()) { + completion.success(); + return; + } + + PrepareHostFileContext context = new PrepareHostFileContext(); + context.hostUuid = spec.getDestHost().getUuid(); + context.vmUuid = spec.getVmInventory().getUuid(); + context.type = VmHostFileType.TpmState; + secureBootExtensions.prepareHostFileOnHost(context, completion); + } + + + @Override + public void preReleaseVmResource(VmInstanceSpec spec, Completion completion) { + completion.success(); + } } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java index d3210a3c2d7..c1de0d42c1b 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmTO.java @@ -4,6 +4,7 @@ public class TpmTO implements Serializable { private String keyProviderUuid; + private String installPath; public String getKeyProviderUuid() { return keyProviderUuid; @@ -12,4 +13,12 @@ public String getKeyProviderUuid() { public void setKeyProviderUuid(String keyProviderUuid) { this.keyProviderUuid = keyProviderUuid; } + + public String getInstallPath() { + return installPath; + } + + public void setInstallPath(String installPath) { + this.installPath = installPath; + } } diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 4e57880a7ce..53cf63d951b 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -254,6 +254,13 @@ + + + + + + + diff --git a/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy b/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy index f8794f26acd..f7e20a79f80 100755 --- a/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy +++ b/testlib/src/main/java/org/zstack/testlib/KVMSimulator.groovy @@ -12,6 +12,7 @@ import org.zstack.header.storage.snapshot.TakeSnapshotsOnKvmResultStruct import org.zstack.header.vm.VmInstanceState import org.zstack.header.vm.VmInstanceVO import org.zstack.header.vm.VmInstanceVO_ +import org.zstack.header.vm.additions.VmHostFileContentFormat import org.zstack.header.vm.devices.DeviceAddress import org.zstack.header.vm.devices.VirtualDeviceInfo import org.zstack.header.volume.VolumeInventory @@ -681,5 +682,24 @@ class KVMSimulator implements Simulator { spec.simulator(KVMConstant.KVM_UPDATE_HOSTNAME_PATH) { return new UpdateHostnameRsp() } + + spec.simulator(KVMConstant.READ_VM_HOST_FILE_PATH) { HttpEntity e -> + def cmd = JSONObjectUtil.toObject(e.body, ReadVmHostFileContentCmd) + + def rsp = new ReadVmHostFileContentResponse() + for (final def param in cmd.hostFiles) { + def to = new VmHostFileTO() + to.path = param.path + to.type = param.type + to.fileFormat = VmHostFileContentFormat.Raw.toString() + to.contentBase64 = "dGVzdA==" + rsp.hostFiles.add(to) + } + return rsp + } + + spec.simulator(KVMConstant.WRITE_VM_HOST_FILE_PATH) { HttpEntity e -> + return new WriteVmHostFileContentResponse() + } } } diff --git a/utils/src/main/java/org/zstack/utils/CollectionUtils.java b/utils/src/main/java/org/zstack/utils/CollectionUtils.java index 8ddb29b647e..12a7ed72c61 100755 --- a/utils/src/main/java/org/zstack/utils/CollectionUtils.java +++ b/utils/src/main/java/org/zstack/utils/CollectionUtils.java @@ -4,6 +4,7 @@ import org.zstack.utils.function.Function; import org.zstack.utils.logging.CLogger; +import javax.annotation.Nullable; import java.lang.reflect.Method; import java.util.*; import java.util.concurrent.ConcurrentHashMap; @@ -56,6 +57,7 @@ public static List filter(Collection from, Predicate tester) { return from.stream().filter(tester).collect(Collectors.toList()); } + @Nullable public static T findOneOrNull(Collection from, Predicate tester) { return from.stream().filter(tester).findFirst().orElse(null); } From 7de9318d19b87edbfd552e31c9489242806c0d85 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 5 Mar 2026 17:49:21 +0800 Subject: [PATCH 35/76] [conf]: update VmHostFileContentVO.content default value Related: ZSV-11310 Change-Id: I646d6f65756f67686c7a766b7361796768677163 --- conf/db/zsv/V5.0.0__schema.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index 73125957d48..a01bae3a95f 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileVO` ( CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileContentVO` ( `uuid` char(32) NOT NULL UNIQUE, - `content` MEDIUMBLOB DEFAULT '', + `content` MEDIUMBLOB DEFAULT NULL, `format` varchar(64) NOT NULL COMMENT 'Raw, TarballGzip', `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', From ff954c5d764db2049d475599ce65c4eca7c66aac Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Fri, 6 Mar 2026 10:15:37 +0800 Subject: [PATCH 36/76] [header]: merge TpmHostRefVO to VmHostFileVO * Removes database tables and corresponding entity classes related to TPM host references * Introducing a more generic VM-host inventory class as a replacement This is patch for feature "vTPM and Secure Boot" Resolves: ZSV-11310 Change-Id: I7a686a77686f63637974786a74656a776472686e --- conf/db/zsv/V5.0.0__schema.sql | 12 -- conf/persistence.xml | 1 - .../header/tpm/entity/TpmCapabilityView.java | 19 +-- .../entity/TpmCapabilityViewDoc_zh_cn.groovy | 10 +- .../tpm/entity/TpmHostRefInventory.java | 106 ----------------- .../header/tpm/entity/TpmHostRefVO.java | 106 ----------------- .../header/tpm/entity/TpmHostRefVO_.java | 15 --- .../header/tpm/entity/TpmInventory.java | 13 --- .../tpm/entity/TpmInventoryDoc_zh_cn.groovy | 5 +- .../org/zstack/header/tpm/entity/TpmVO.java | 19 --- .../header/vm/additions/PackageInfo.java | 7 ++ .../vm/additions/VmHostFileInventory.java | 108 ++++++++++++++++++ .../VmHostFileInventoryDoc_zh_cn.groovy} | 24 ++-- .../org/zstack/kvm/tpm/KvmTpmManager.java | 14 ++- sdk/src/main/java/SourceClassMap.java | 4 +- .../sdk/tpm/entity/TpmCapabilityView.java | 10 +- .../zstack/sdk/tpm/entity/TpmInventory.java | 8 -- .../entity/VmHostFileInventory.java} | 32 ++++-- 18 files changed, 188 insertions(+), 325 deletions(-) delete mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java delete mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java delete mode 100644 header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/PackageInfo.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventory.java rename header/src/main/java/org/zstack/header/{tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy => vm/additions/VmHostFileInventoryDoc_zh_cn.groovy} (51%) rename sdk/src/main/java/org/zstack/sdk/{tpm/entity/TpmHostRefInventory.java => vm/entity/VmHostFileInventory.java} (57%) diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index a01bae3a95f..f03734152a5 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -9,18 +9,6 @@ CREATE TABLE IF NOT EXISTS `zstack`.`TpmVO` ( CONSTRAINT `fkTpmVOVmInstanceVO` FOREIGN KEY (`vmInstanceUuid`) REFERENCES `VmInstanceEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE IF NOT EXISTS `zstack`.`TpmHostRefVO` ( - `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT, - `tpmUuid` char(32) NOT NULL, - `hostUuid` char(32) NOT NULL, - `path` varchar(255) NOT NULL, - `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', - PRIMARY KEY (`id`), - CONSTRAINT `fkTpmHostRefVOTpmVO` FOREIGN KEY (`tpmUuid`) REFERENCES `TpmVO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE, - CONSTRAINT `fkTpmHostRefVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON UPDATE RESTRICT ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileVO` ( `uuid` char(32) NOT NULL UNIQUE, `vmInstanceUuid` char(32) NOT NULL, diff --git a/conf/persistence.xml b/conf/persistence.xml index eb4d624e6af..0fa065a6a71 100755 --- a/conf/persistence.xml +++ b/conf/persistence.xml @@ -18,7 +18,6 @@ org.zstack.resourceconfig.ResourceConfigVO org.zstack.header.managementnode.ManagementNodeVO org.zstack.header.managementnode.ManagementNodeContextVO - org.zstack.header.tpm.entity.TpmHostRefVO org.zstack.header.tpm.entity.TpmVO org.zstack.header.vm.additions.VmHostFileVO org.zstack.header.vm.additions.VmHostFileContentVO diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java index 2b60316df52..3af24b8662c 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityView.java @@ -1,11 +1,13 @@ package org.zstack.header.tpm.entity; import org.zstack.header.configuration.PythonClass; +import org.zstack.header.vm.additions.VmHostFileInventory; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.List; +import static org.zstack.utils.CollectionDSL.list; + @PythonClass public class TpmCapabilityView { // fields in TpmInventory @@ -14,7 +16,10 @@ public class TpmCapabilityView { private String vmInstanceUuid; private Timestamp createDate; private Timestamp lastOpDate; - private List hostRefs; + /** + * collect VmHostFileInventory(VmHostFileVO) type=NvRam or type=TpmState + */ + private List fileRefs; // related table fields // TODO keyProviderUuid / keyProviderType / keyProviderName / keyProviderKeyVersion @@ -32,7 +37,6 @@ public void setTpmInventory(TpmInventory inventory) { setVmInstanceUuid(inventory.getVmInstanceUuid()); setCreateDate(inventory.getCreateDate()); setLastOpDate(inventory.getLastOpDate()); - setHostRefs(new ArrayList<>(inventory.getHostRefs())); } public String getUuid() { @@ -75,12 +79,12 @@ public void setLastOpDate(Timestamp lastOpDate) { this.lastOpDate = lastOpDate; } - public List getHostRefs() { - return hostRefs; + public List getFileRefs() { + return fileRefs; } - public void setHostRefs(List hostRefs) { - this.hostRefs = hostRefs; + public void setFileRefs(List fileRefs) { + this.fileRefs = fileRefs; } public String getEdkVersion() { @@ -110,6 +114,7 @@ public void setResetTpmAfterVmCloneConfig(boolean resetTpmAfterVmCloneConfig) { public static TpmCapabilityView __example__() { TpmCapabilityView view = new TpmCapabilityView(); view.setTpmInventory(TpmInventory.__example__()); + view.setFileRefs(list(VmHostFileInventory.__example__())); view.setEdkVersion("edk2-ovmf-20220126gitbb1bba3d77-3.el8.noarch"); view.setSwtpmVersion("0.8.2"); diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy index a286e9d74c7..4ee7ba90b8a 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmCapabilityViewDoc_zh_cn.groovy @@ -1,7 +1,7 @@ package org.zstack.header.tpm.entity import java.sql.Timestamp -import org.zstack.header.tpm.entity.TpmHostRefInventory +import org.zstack.header.vm.additions.VmHostFileInventory doc { @@ -38,12 +38,12 @@ doc { since "5.0.0" } ref { - name "hostRefs" - path "org.zstack.header.tpm.entity.TpmCapabilityView.hostRefs" - desc "TPM 与主机的相关数据列表" + name "fileRefs" + path "org.zstack.header.tpm.entity.TpmCapabilityView.fileRefs" + desc "TPM 相关的主机侧文件或目录数据列表" type "List" since "5.0.0" - clz TpmHostRefInventory.class + clz VmHostFileInventory.class } field { name "edkVersion" diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java deleted file mode 100644 index 3253723c0d0..00000000000 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventory.java +++ /dev/null @@ -1,106 +0,0 @@ -package org.zstack.header.tpm.entity; - -import org.zstack.header.host.HostInventory; -import org.zstack.header.host.HostVO; -import org.zstack.header.message.DocUtils; -import org.zstack.header.query.ExpandedQueries; -import org.zstack.header.query.ExpandedQuery; -import org.zstack.header.search.Inventory; - -import java.sql.Timestamp; -import java.util.Collection; -import java.util.List; - -import static org.zstack.utils.CollectionUtils.transform; - -@Inventory(mappingVOClass = TpmHostRefVO.class) -@ExpandedQueries({ - @ExpandedQuery(expandedField = "tpm", inventoryClass = TpmInventory.class, - foreignKey = "tpmUuid", expandedInventoryKey = "uuid"), - @ExpandedQuery(expandedField = "host", inventoryClass = HostInventory.class, - foreignKey = "hostUuid", expandedInventoryKey = "uuid"), -}) -public class TpmHostRefInventory { - private long id; - private String tpmUuid; - private String hostUuid; - private String path; - private Timestamp createDate; - private Timestamp lastOpDate; - - public TpmHostRefInventory() { - } - - public static TpmHostRefInventory valueOf(TpmHostRefVO vo) { - TpmHostRefInventory inv = new TpmHostRefInventory(); - inv.setId(vo.getId()); - inv.setTpmUuid(vo.getTpmUuid()); - inv.setHostUuid(vo.getHostUuid()); - inv.setPath(vo.getPath()); - inv.setCreateDate(vo.getCreateDate()); - inv.setLastOpDate(vo.getLastOpDate()); - return inv; - } - - public static List valueOf(Collection vos) { - return transform(vos, TpmHostRefInventory::valueOf); - } - - public long getId() { - return id; - } - - public void setId(long id) { - this.id = id; - } - - public String getTpmUuid() { - return tpmUuid; - } - - public void setTpmUuid(String tpmUuid) { - this.tpmUuid = tpmUuid; - } - - public String getHostUuid() { - return hostUuid; - } - - public void setHostUuid(String hostUuid) { - this.hostUuid = hostUuid; - } - - public String getPath() { - return path; - } - - public void setPath(String path) { - this.path = path; - } - - public Timestamp getCreateDate() { - return createDate; - } - - public void setCreateDate(Timestamp createDate) { - this.createDate = createDate; - } - - public Timestamp getLastOpDate() { - return lastOpDate; - } - - public void setLastOpDate(Timestamp lastOpDate) { - this.lastOpDate = lastOpDate; - } - - public static TpmHostRefInventory __example__() { - TpmHostRefInventory ref = new TpmHostRefInventory(); - ref.setId(1L); - ref.setTpmUuid(DocUtils.createFixedUuid(TpmVO.class)); - ref.setHostUuid(DocUtils.createFixedUuid(HostVO.class)); - ref.setCreateDate(DocUtils.timestamp()); - ref.setLastOpDate(DocUtils.timestamp()); - return ref; - } -} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java deleted file mode 100644 index 38bdbf5049f..00000000000 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO.java +++ /dev/null @@ -1,106 +0,0 @@ -package org.zstack.header.tpm.entity; - -import org.zstack.header.host.HostVO; -import org.zstack.header.vo.EntityGraph; -import org.zstack.header.vo.ForeignKey; -import org.zstack.header.vo.ToInventory; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.GeneratedValue; -import javax.persistence.GenerationType; -import javax.persistence.Id; -import javax.persistence.Table; -import java.sql.Timestamp; - -@Entity -@Table -@EntityGraph( - friends = { - @EntityGraph.Neighbour(type = TpmVO.class, myField = "tpmUuid", targetField = "uuid"), - @EntityGraph.Neighbour(type = HostVO.class, myField = "hostUuid", targetField = "uuid"), - } -) -public class TpmHostRefVO implements ToInventory { - @Id - @GeneratedValue(strategy = GenerationType.IDENTITY) - @Column - private long id; - - @Column - @ForeignKey(parentEntityClass = TpmVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) - private String tpmUuid; - - @Column - @ForeignKey(parentEntityClass = HostVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) - private String hostUuid; - - @Column - private String path; - - @Column - private Timestamp createDate; - - @Column - private Timestamp lastOpDate; - - public long getId() { - return id; - } - - public void setId(long id) { - this.id = id; - } - - public String getTpmUuid() { - return tpmUuid; - } - - public void setTpmUuid(String tpmUuid) { - this.tpmUuid = tpmUuid; - } - - public String getHostUuid() { - return hostUuid; - } - - public void setHostUuid(String hostUuid) { - this.hostUuid = hostUuid; - } - - public String getPath() { - return path; - } - - public void setPath(String path) { - this.path = path; - } - - public Timestamp getCreateDate() { - return createDate; - } - - public void setCreateDate(Timestamp createDate) { - this.createDate = createDate; - } - - public Timestamp getLastOpDate() { - return lastOpDate; - } - - public void setLastOpDate(Timestamp lastOpDate) { - this.lastOpDate = lastOpDate; - } - - @Override - public String toString() { - return "TpmHostRefVO{" + - "id=" + id + - ", tpmUuid='" + tpmUuid + '\'' + - ", hostUuid='" + hostUuid + '\'' + - ", path='" + path + '\'' + - ", createDate=" + createDate + - ", lastOpDate=" + lastOpDate + - '}'; - } -} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java deleted file mode 100644 index ee4d9654711..00000000000 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefVO_.java +++ /dev/null @@ -1,15 +0,0 @@ -package org.zstack.header.tpm.entity; - -import javax.persistence.metamodel.SingularAttribute; -import javax.persistence.metamodel.StaticMetamodel; -import java.sql.Timestamp; - -@StaticMetamodel(TpmHostRefVO.class) -public class TpmHostRefVO_ { - public static volatile SingularAttribute id; - public static volatile SingularAttribute tpmUuid; - public static volatile SingularAttribute hostUuid; - public static volatile SingularAttribute path; - public static volatile SingularAttribute createDate; - public static volatile SingularAttribute lastOpDate; -} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java index 70ea376e643..eafd210a61d 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventory.java @@ -10,11 +10,9 @@ import java.io.Serializable; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Collection; import java.util.List; -import static org.zstack.utils.CollectionDSL.list; import static org.zstack.utils.CollectionUtils.transform; @PythonClassInventory @@ -29,7 +27,6 @@ public class TpmInventory implements Serializable { private String vmInstanceUuid; private Timestamp createDate; private Timestamp lastOpDate; - private List hostRefs = new ArrayList<>(); public TpmInventory() { } @@ -41,7 +38,6 @@ public static TpmInventory valueOf(TpmVO vo) { inv.setVmInstanceUuid(vo.getVmInstanceUuid()); inv.setCreateDate(vo.getCreateDate()); inv.setLastOpDate(vo.getLastOpDate()); - inv.setHostRefs(TpmHostRefInventory.valueOf(vo.getHostRefs())); return inv; } @@ -89,14 +85,6 @@ public void setLastOpDate(Timestamp lastOpDate) { this.lastOpDate = lastOpDate; } - public List getHostRefs() { - return hostRefs; - } - - public void setHostRefs(List hostRefs) { - this.hostRefs = hostRefs; - } - public static TpmInventory __example__() { TpmInventory tpm = new TpmInventory(); tpm.setUuid(DocUtils.createFixedUuid(TpmVO.class)); @@ -104,7 +92,6 @@ public static TpmInventory __example__() { tpm.setName("TPM-for-VM-" + tpm.getVmInstanceUuid()); tpm.setCreateDate(DocUtils.timestamp()); tpm.setLastOpDate(DocUtils.timestamp()); - tpm.setHostRefs(list(TpmHostRefInventory.__example__())); return tpm; } } diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy index 9908023d6c4..f67ac502f79 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmInventoryDoc_zh_cn.groovy @@ -1,7 +1,6 @@ package org.zstack.header.tpm.entity -import java.sql.Timestamp -import org.zstack.header.tpm.entity.TpmHostRefInventory +import org.zstack.header.vm.additions.VmHostFileInventory doc { @@ -43,6 +42,6 @@ doc { desc "TPM 与主机的相关数据列表" type "List" since "5.0.0" - clz TpmHostRefInventory.class + clz VmHostFileInventory.class } } diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java index 02bbda43431..e5fafea5689 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java +++ b/header/src/main/java/org/zstack/header/tpm/entity/TpmVO.java @@ -7,7 +7,6 @@ import org.zstack.header.vo.BaseResource; import org.zstack.header.vo.EntityGraph; import org.zstack.header.vo.ForeignKey; -import org.zstack.header.vo.NoView; import org.zstack.header.vo.ResourceVO; import org.zstack.header.vo.SoftDeletionCascade; import org.zstack.header.vo.SoftDeletionCascades; @@ -15,14 +14,9 @@ import javax.persistence.Column; import javax.persistence.Entity; -import javax.persistence.FetchType; -import javax.persistence.JoinColumn; -import javax.persistence.OneToMany; import javax.persistence.Table; import javax.persistence.Transient; import java.sql.Timestamp; -import java.util.HashSet; -import java.util.Set; @Entity @Table @@ -50,11 +44,6 @@ public class TpmVO extends ResourceVO implements ToInventory, OwnedByAccount { @Transient private String accountUuid; - @OneToMany(fetch = FetchType.EAGER) - @JoinColumn(name = "tpmUuid", insertable = false, updatable = false) - @NoView - private Set hostRefs = new HashSet<>(); - public String getVmInstanceUuid() { return vmInstanceUuid; } @@ -89,14 +78,6 @@ public void setAccountUuid(String accountUuid) { this.accountUuid = accountUuid; } - public Set getHostRefs() { - return hostRefs; - } - - public void setHostRefs(Set hostRefs) { - this.hostRefs = hostRefs; - } - public TpmVO() { } diff --git a/header/src/main/java/org/zstack/header/vm/additions/PackageInfo.java b/header/src/main/java/org/zstack/header/vm/additions/PackageInfo.java new file mode 100644 index 00000000000..dc9921af1f4 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/PackageInfo.java @@ -0,0 +1,7 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.rest.SDKPackage; + +@SDKPackage(packageName="org.zstack.sdk.vm.entity") +public class PackageInfo { +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventory.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventory.java new file mode 100644 index 00000000000..fc5ecb9bb9f --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventory.java @@ -0,0 +1,108 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.host.HostVO; +import org.zstack.header.message.DocUtils; +import org.zstack.header.vm.VmInstanceVO; + +import java.sql.Timestamp; +import java.util.Collection; +import java.util.List; + +import static org.zstack.utils.CollectionUtils.transform; + +public class VmHostFileInventory { + private String uuid; + private String vmInstanceUuid; + private String hostUuid; + private String type; + private String path; + private Timestamp createDate; + private Timestamp lastOpDate; + + public VmHostFileInventory() { + } + + public static VmHostFileInventory valueOf(VmHostFileVO vo) { + VmHostFileInventory inv = new VmHostFileInventory(); + inv.setUuid(vo.getUuid()); + inv.setVmInstanceUuid(vo.getVmInstanceUuid()); + inv.setHostUuid(vo.getHostUuid()); + inv.setType(vo.getType().toString()); + inv.setPath(vo.getPath()); + inv.setCreateDate(vo.getCreateDate()); + inv.setLastOpDate(vo.getLastOpDate()); + return inv; + } + + public static List valueOf(Collection vos) { + return transform(vos, VmHostFileInventory::valueOf); + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + public static VmHostFileInventory __example__() { + VmHostFileInventory ref = new VmHostFileInventory(); + ref.setUuid(DocUtils.createFixedUuid(VmHostFileVO.class)); + ref.setVmInstanceUuid(DocUtils.createFixedUuid(VmInstanceVO.class)); + ref.setHostUuid(DocUtils.createFixedUuid(HostVO.class)); + ref.setType(VmHostFileType.TpmState.toString()); + ref.setPath("/var/lib/libvirt/swtpm/" + ref.getHostUuid() + "/"); + ref.setCreateDate(DocUtils.timestamp()); + ref.setLastOpDate(DocUtils.timestamp()); + return ref; + } +} diff --git a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventoryDoc_zh_cn.groovy similarity index 51% rename from header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy rename to header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventoryDoc_zh_cn.groovy index d8330cb7ff6..0ec8eb79265 100644 --- a/header/src/main/java/org/zstack/header/tpm/entity/TpmHostRefInventoryDoc_zh_cn.groovy +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileInventoryDoc_zh_cn.groovy @@ -1,20 +1,18 @@ -package org.zstack.header.tpm.entity - -import java.sql.Timestamp +package org.zstack.header.vm.additions doc { - title "TPM 与主机的相关数据" + title "虚拟机在主机侧的相关文件或目录数据" field { - name "id" - desc "自增主键" - type "long" + name "uuid" + desc "相关文件 UUID" + type "String" since "5.0.0" } field { - name "tpmUuid" - desc "TPM UUID" + name "vmInstanceUuid" + desc "虚拟机 UUID" type "String" since "5.0.0" } @@ -24,9 +22,15 @@ doc { type "String" since "5.0.0" } + field { + name "type" + desc "文件类型, 按用途分类, 可能是 NvRam 或者 TpmState" + type "String" + since "5.0.0" + } field { name "path" - desc "遗留 TPM 状态文件的位置" + desc "主机侧相关文件或目录的路径" type "String" since "5.0.0" } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java index 44b31131c87..4eb436a4b72 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java @@ -38,11 +38,16 @@ import org.zstack.header.tpm.message.RemoveTpmReply; import org.zstack.header.vm.VmInstanceVO; import org.zstack.header.vm.VmInstanceVO_; +import org.zstack.header.vm.additions.VmHostFileInventory; +import org.zstack.header.vm.additions.VmHostFileType; +import org.zstack.header.vm.additions.VmHostFileVO; +import org.zstack.header.vm.additions.VmHostFileVO_; import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; +import java.util.List; import java.util.Map; import static org.zstack.compute.vm.VmGlobalConfig.RESET_TPM_AFTER_VM_CLONE; @@ -54,6 +59,7 @@ import static org.zstack.kvm.KVMSystemTags.SWTPM_VERSION; import static org.zstack.kvm.KVMSystemTags.SWTPM_VERSION_TOKEN; import static org.zstack.kvm.KVMSystemTags.VM_EDK; +import static org.zstack.utils.CollectionDSL.list; public class KvmTpmManager extends AbstractService { private static final CLogger logger = Utils.getLogger(KvmTpmManager.class); @@ -312,8 +318,14 @@ private void handle(APIGetTpmCapabilityMsg msg) { final VmInstanceVO vm = Q.New(VmInstanceVO.class) .eq(VmInstanceVO_.uuid, tpm.getVmInstanceUuid()) .find(); - view.setTpmInventory(TpmInventory.valueOf(tpm)); + + List files = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, vm.getUuid()) + .in(VmHostFileVO_.type, list(VmHostFileType.TpmState, VmHostFileType.NvRam)) + .list(); + view.setFileRefs(VmHostFileInventory.valueOf(files)); + view.setEdkVersion(VM_EDK.getTokenByResourceUuid(vm.getUuid(), EDK_RPM_TOKEN)); if (vm.getHostUuid() != null) { diff --git a/sdk/src/main/java/SourceClassMap.java b/sdk/src/main/java/SourceClassMap.java index 160dc8aad5e..0970b63b62f 100644 --- a/sdk/src/main/java/SourceClassMap.java +++ b/sdk/src/main/java/SourceClassMap.java @@ -277,7 +277,6 @@ public class SourceClassMap { put("org.zstack.header.tag.TagPatternType", "org.zstack.sdk.TagPatternType"); put("org.zstack.header.tag.UserTagInventory", "org.zstack.sdk.UserTagInventory"); put("org.zstack.header.tpm.entity.TpmCapabilityView", "org.zstack.sdk.tpm.entity.TpmCapabilityView"); - put("org.zstack.header.tpm.entity.TpmHostRefInventory", "org.zstack.sdk.tpm.entity.TpmHostRefInventory"); put("org.zstack.header.tpm.entity.TpmInventory", "org.zstack.sdk.tpm.entity.TpmInventory"); put("org.zstack.header.vdpa.VmVdpaNicInventory", "org.zstack.sdk.VmVdpaNicInventory"); put("org.zstack.header.vipQos.VipQosInventory", "org.zstack.sdk.VipQosInventory"); @@ -295,6 +294,7 @@ public class SourceClassMap { put("org.zstack.header.vm.VmPriorityConfigInventory", "org.zstack.sdk.VmPriorityConfigInventory"); put("org.zstack.header.vm.VmPriorityLevel", "org.zstack.sdk.VmPriorityLevel"); put("org.zstack.header.vm.VmSchedHistoryInventory", "org.zstack.sdk.VmSchedHistoryInventory"); + put("org.zstack.header.vm.additions.VmHostFileInventory", "org.zstack.sdk.vm.entity.VmHostFileInventory"); put("org.zstack.header.vm.cdrom.VmCdRomInventory", "org.zstack.sdk.VmCdRomInventory"); put("org.zstack.header.vm.devices.DeviceAddress", "org.zstack.sdk.DeviceAddress"); put("org.zstack.header.vm.devices.VmInstanceResourceMetadataArchiveInventory", "org.zstack.sdk.VmInstanceResourceMetadataArchiveInventory"); @@ -1286,8 +1286,8 @@ public class SourceClassMap { put("org.zstack.sdk.softwarePackage.header.JobDetails", "org.zstack.softwarePackage.header.JobDetails"); put("org.zstack.sdk.softwarePackage.header.SoftwarePackageInventory", "org.zstack.softwarePackage.header.SoftwarePackageInventory"); put("org.zstack.sdk.tpm.entity.TpmCapabilityView", "org.zstack.header.tpm.entity.TpmCapabilityView"); - put("org.zstack.sdk.tpm.entity.TpmHostRefInventory", "org.zstack.header.tpm.entity.TpmHostRefInventory"); put("org.zstack.sdk.tpm.entity.TpmInventory", "org.zstack.header.tpm.entity.TpmInventory"); + put("org.zstack.sdk.vm.entity.VmHostFileInventory", "org.zstack.header.vm.additions.VmHostFileInventory"); put("org.zstack.sdk.zbox.ZBoxBackupInventory", "org.zstack.externalbackup.zbox.ZBoxBackupInventory"); put("org.zstack.sdk.zbox.ZBoxBackupStorageBackupInfo", "org.zstack.externalbackup.zbox.ZBoxBackupStorageBackupInfo"); put("org.zstack.sdk.zbox.ZBoxInventory", "org.zstack.zbox.ZBoxInventory"); diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java index f77138cdfc7..38fa336e3d5 100644 --- a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java +++ b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmCapabilityView.java @@ -44,12 +44,12 @@ public java.sql.Timestamp getLastOpDate() { return this.lastOpDate; } - public java.util.List hostRefs; - public void setHostRefs(java.util.List hostRefs) { - this.hostRefs = hostRefs; + public java.util.List fileRefs; + public void setFileRefs(java.util.List fileRefs) { + this.fileRefs = fileRefs; } - public java.util.List getHostRefs() { - return this.hostRefs; + public java.util.List getFileRefs() { + return this.fileRefs; } public java.lang.String edkVersion; diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java index e4fa21a0746..538b12182ca 100644 --- a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java +++ b/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmInventory.java @@ -44,12 +44,4 @@ public java.sql.Timestamp getLastOpDate() { return this.lastOpDate; } - public java.util.List hostRefs; - public void setHostRefs(java.util.List hostRefs) { - this.hostRefs = hostRefs; - } - public java.util.List getHostRefs() { - return this.hostRefs; - } - } diff --git a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java b/sdk/src/main/java/org/zstack/sdk/vm/entity/VmHostFileInventory.java similarity index 57% rename from sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java rename to sdk/src/main/java/org/zstack/sdk/vm/entity/VmHostFileInventory.java index 85db7ad9eb8..7c72bb8edb3 100644 --- a/sdk/src/main/java/org/zstack/sdk/tpm/entity/TpmHostRefInventory.java +++ b/sdk/src/main/java/org/zstack/sdk/vm/entity/VmHostFileInventory.java @@ -1,23 +1,23 @@ -package org.zstack.sdk.tpm.entity; +package org.zstack.sdk.vm.entity; -public class TpmHostRefInventory { +public class VmHostFileInventory { - public long id; - public void setId(long id) { - this.id = id; + public java.lang.String uuid; + public void setUuid(java.lang.String uuid) { + this.uuid = uuid; } - public long getId() { - return this.id; + public java.lang.String getUuid() { + return this.uuid; } - public java.lang.String tpmUuid; - public void setTpmUuid(java.lang.String tpmUuid) { - this.tpmUuid = tpmUuid; + public java.lang.String vmInstanceUuid; + public void setVmInstanceUuid(java.lang.String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; } - public java.lang.String getTpmUuid() { - return this.tpmUuid; + public java.lang.String getVmInstanceUuid() { + return this.vmInstanceUuid; } public java.lang.String hostUuid; @@ -28,6 +28,14 @@ public java.lang.String getHostUuid() { return this.hostUuid; } + public java.lang.String type; + public void setType(java.lang.String type) { + this.type = type; + } + public java.lang.String getType() { + return this.type; + } + public java.lang.String path; public void setPath(java.lang.String path) { this.path = path; From 2dfc2dc48dd8861bc60e6d959c4526bafab2c91a Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Tue, 10 Mar 2026 15:59:11 +0800 Subject: [PATCH 37/76] [kvm]: fix swtpm path error Modified the ``buildTpmStateFilePath`` method in ``KVMConstant.java`` to add normalization processing for the VM UUID. This converts it to a hyphenated format before using it in path construction. Resolves: ZSV-11463 Related: ZSV-11310 Change-Id: I786176646a6a756e727571696374716e6f6f6265 --- plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index cae9533b7f2..575d54016c2 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -193,7 +193,8 @@ public static String buildNvramFilePath(String vmUuid) { public static final String TPM_STATE_FILE_PATH_FORMAT = "/var/lib/libvirt/swtpm/%s/"; public static String buildTpmStateFilePath(String vmUuid) { - return String.format(TPM_STATE_FILE_PATH_FORMAT, vmUuid); + String vmUuidWithHyphen = vmUuid.replaceFirst("(\\w{8})(\\w{4})(\\w{4})(\\w{4})(\\w{12})", "$1-$2-$3-$4-$5"); + return String.format(TPM_STATE_FILE_PATH_FORMAT, vmUuidWithHyphen); } public static final String DHCP_BIN_FILE_PATH = "/usr/local/zstack/dnsmasq"; From 32d4708e2a93c7293a168895b5a6037506a54d6c Mon Sep 17 00:00:00 2001 From: "tao.yang" Date: Fri, 6 Mar 2026 17:43:07 +0800 Subject: [PATCH 38/76] [sdk]: rekey API and root key synchronization Resolves: ZSV-11331 Change-Id: I66626a707a6465616d70616171656469796d6476 --- .../vm/devices/VmTpmRekeyAssociation.java | 35 +++++++++++++++++++ conf/springConfigXml/VmInstanceManager.xml | 20 +++++++---- ...roviderRekeyAssociationExtensionPoint.java | 11 ++++++ .../api/RekeyKeyProviderRefsAction.java | 11 +++++- 4 files changed, 69 insertions(+), 8 deletions(-) create mode 100644 compute/src/main/java/org/zstack/compute/vm/devices/VmTpmRekeyAssociation.java create mode 100644 header/src/main/java/org/zstack/header/keyprovider/KeyProviderRekeyAssociationExtensionPoint.java diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmRekeyAssociation.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmRekeyAssociation.java new file mode 100644 index 00000000000..ae469a0a9b4 --- /dev/null +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmRekeyAssociation.java @@ -0,0 +1,35 @@ +package org.zstack.compute.vm.devices; + +import org.zstack.core.db.Q; +import org.zstack.header.keyprovider.KeyProviderRekeyAssociationExtensionPoint; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; +import org.zstack.header.vm.VmInstanceVO; +import org.zstack.utils.CollectionUtils; + +import java.util.Collections; +import java.util.List; + +public class VmTpmRekeyAssociation implements KeyProviderRekeyAssociationExtensionPoint { + @Override + public String getType() { + return VmInstanceVO.class.getSimpleName(); + } + + @Override + public String getAssociatedResourceType() { + return TpmVO.class.getSimpleName(); + } + + @Override + public List getAssociatedResourceUuids(List resourceUuids) { + if (CollectionUtils.isEmpty(resourceUuids)) { + return Collections.emptyList(); + } + + return Q.New(TpmVO.class) + .in(TpmVO_.vmInstanceUuid, resourceUuids) + .select(TpmVO_.uuid) + .listValues(); + } +} diff --git a/conf/springConfigXml/VmInstanceManager.xml b/conf/springConfigXml/VmInstanceManager.xml index 0e82353d042..55af623b940 100755 --- a/conf/springConfigXml/VmInstanceManager.xml +++ b/conf/springConfigXml/VmInstanceManager.xml @@ -283,10 +283,16 @@ - - - - - - - + + + + + + + + + + + + + diff --git a/header/src/main/java/org/zstack/header/keyprovider/KeyProviderRekeyAssociationExtensionPoint.java b/header/src/main/java/org/zstack/header/keyprovider/KeyProviderRekeyAssociationExtensionPoint.java new file mode 100644 index 00000000000..e23f704b259 --- /dev/null +++ b/header/src/main/java/org/zstack/header/keyprovider/KeyProviderRekeyAssociationExtensionPoint.java @@ -0,0 +1,11 @@ +package org.zstack.header.keyprovider; + +import java.util.List; + +public interface KeyProviderRekeyAssociationExtensionPoint { + String getType(); + + String getAssociatedResourceType(); + + List getAssociatedResourceUuids(List resourceUuids); +} diff --git a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java index 2d005f5c3dc..ba08306ec15 100644 --- a/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java +++ b/sdk/src/main/java/org/zstack/sdk/keyprovider/api/RekeyKeyProviderRefsAction.java @@ -25,12 +25,21 @@ public Result throwExceptionIfError() { } } - @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + @Param(required = false, nonempty = true, nullElements = false, emptyString = false, noTrim = false) public java.util.List refIds; + @Param(required = false, nonempty = true, nullElements = false, emptyString = false, noTrim = false) + public java.util.List resourceUuids; + + @Param(required = false, nonempty = false, nullElements = false, emptyString = false, noTrim = false) + public java.lang.String resourceType; + @Param(required = true, nonempty = false, nullElements = false, emptyString = false, noTrim = false) public java.lang.String providerUuid; + @Param(required = false, nonempty = false, nullElements = false, emptyString = true, noTrim = false) + public boolean rekeyAll = false; + @Param(required = false) public java.util.List systemTags; From 0e136804392a4810bbe2e96a87d9ec29909e1f97 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Wed, 11 Mar 2026 12:32:01 +0800 Subject: [PATCH 39/76] [kvm]: update condition write VM host file * Refactors the error handling mechanism for host file reads within the Secure Boot extension. * Introduces an error aggregation strategy that accumulates failed file reads into a list instead of immediately terminating the process; overall success or failure is determined only after all paths have been processed. * A new `firstReadSuccess` flag is added to track the initial read status, and the skip condition logic for multiple stream steps is adjusted. Resolves: ZSV-11444 Related: ZSV-11436 Related: ZSV-11310 Change-Id: I6f636d7364787872716a6b7a6a7a76736a697762 --- .../kvm/efi/KvmSecureBootExtensions.java | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index f9d8f923a76..83b8e8720c8 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -209,14 +209,16 @@ public void success(KvmResponseWrapper wrapper) { existsContentUuid = Collections.emptyList(); } + List errors = new ArrayList<>(); for (String path : cmd.getPaths()) { VmHostFileTO to = findOneOrNull(readRsp.getHostFiles(), item -> item.getPath().equals(path)); if (to == null) { continue; } if (to.getError() != null) { - logger.warn(String.format("failed to read file content from host[uuid=%s] with file %s: %s", - context.hostUuid, path, to.getError())); + errors.add(operr("failed to read file %s", path) + .withOpaque("path", path) + .withException(to.getError())); continue; } @@ -264,7 +266,11 @@ public void success(KvmResponseWrapper wrapper) { } } - completion.success(); + if (errors.isEmpty()) { + completion.success(); + } else { + completion.fail(operr("failed to read file content from host[uuid=%s]", context.hostUuid).withCause(errors)); + } } @Override @@ -406,6 +412,8 @@ public static class PrepareHostFileContext { // whether the NvRam is on the same host as before private boolean sameHost = false; + private boolean firstReadSuccess = false; + private boolean writeSuccess = false; private VmHostFileVO vmHostFile; } @@ -424,14 +432,6 @@ public void run(FlowTrigger trigger, Map data) { .orderByDesc(VmHostFileVO_.lastOpDate) .limit(1) .find(); - context.sameHost = vmHostFile != null && vmHostFile.getHostUuid().equals(context.hostUuid); - if (context.sameHost) { - logger.debug(String.format("skip to read/write %s host file for VM[vmUuid=%s]: vm.host is not changed", - context.type, context.vmUuid)); - trigger.next(); - return; - } - if (vmHostFile == null) { logger.debug(String.format("skip to read/write %s host file for VM[vmUuid=%s]: file is not registered in MN", context.type, context.vmUuid)); @@ -439,6 +439,7 @@ public void run(FlowTrigger trigger, Map data) { return; } + context.sameHost = vmHostFile.getHostUuid().equals(context.hostUuid); SyncVmHostFilesFromHostContext syncContext = new SyncVmHostFilesFromHostContext(); syncContext.hostUuid = vmHostFile.getHostUuid(); syncContext.vmUuid = context.vmUuid; @@ -454,12 +455,15 @@ public void run(FlowTrigger trigger, Map data) { syncVmHostFilesFromHost(syncContext, new Completion(trigger) { @Override public void success() { + context.firstReadSuccess = true; trigger.next(); } @Override public void fail(ErrorCode errorCode) { - trigger.fail(errorCode); + logger.warn(String.format("failed to read vm host file for VM[vmUuid=%s] but still continue: %s", + context.vmUuid, errorCode.getReadableDetails())); + trigger.next(); } }); } @@ -468,7 +472,7 @@ public void fail(ErrorCode errorCode) { @Override public boolean skip(Map data) { - return context.sameHost || context.vmHostFile == null; + return context.vmHostFile == null || (context.sameHost && context.firstReadSuccess); } @Override @@ -498,6 +502,7 @@ public void run(FlowTrigger trigger, Map data) { rewriteVmHostFiles(rewriteContext, new Completion(trigger) { @Override public void success() { + context.writeSuccess = true; trigger.next(); } @@ -512,8 +517,7 @@ public void fail(ErrorCode errorCode) { @Override public boolean skip(Map data) { - // if context.sameHost is true, we also need to re-read the host file for cache. - return context.vmHostFile == null; + return !context.writeSuccess; } @Override From 116a87cf4ccae119b113b1ad14b25349df0f7ebf Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:10:15 +0800 Subject: [PATCH 40/76] Envelope wrapper dek to compute --- conf/db/zsv/V5.0.0__schema.sql | 1 + ...retCreateExt \342\200\216ensionPoint.java" | 19 ++ .../secret/SecretGetExtensionPoint.java | 19 ++ .../header/secret/SecretHostDefineMsg.java | 30 ++ .../header/secret/SecretHostDefineReply.java | 28 ++ plugin/kvm/pom.xml | 7 +- .../zstack/kvm/HostSecretEnvelopeCrypto.java | 132 +++++++++ .../java/org/zstack/kvm/KVMAgentCommands.java | 84 ++++++ .../main/java/org/zstack/kvm/KVMConstant.java | 6 + .../src/main/java/org/zstack/kvm/KVMHost.java | 266 ++++++++++++++++++ .../kvm/host/HostSecretCase.groovy | 149 ++++++++++ 11 files changed, 740 insertions(+), 1 deletion(-) create mode 100644 "header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" create mode 100644 header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java create mode 100644 header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java create mode 100644 header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java create mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index f03734152a5..ac3b849ec1e 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -93,6 +93,7 @@ CREATE TABLE IF NOT EXISTS `zstack`.`HostKeyIdentityVO` ( `hostUuid` varchar(32) NOT NULL UNIQUE, `publicKey` text NOT NULL, `fingerprint` varchar(128) NOT NULL, + `verified` boolean NOT NULL DEFAULT FALSE, `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', PRIMARY KEY (`hostUuid`), diff --git "a/header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" "b/header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" new file mode 100644 index 00000000000..7c8ab192ecd --- /dev/null +++ "b/header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" @@ -0,0 +1,19 @@ + +package org.zstack.header.secret; + +import org.zstack.header.core.ReturnValueCompletion; + +/** + * Extension point for creating a secret in key-manager (e.g. premium with NKP/KMS). + * Used for VM (e.g. vTPM at VM create). Premium implements with key-manager create; + * success returns secretId/name for later get. + */ +public interface SecretCreateExtensionPoint { + /** + * Create a secret. Implementation (e.g. premium) calls key-manager create. + * + * @param secretName name or identifier for the secret + * @param completion success(secretIdOrName) for later get, or fail(error) + */ + void createSecret(String secretName, ReturnValueCompletion completion); +} diff --git a/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java b/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java new file mode 100644 index 00000000000..78b28283ab9 --- /dev/null +++ b/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java @@ -0,0 +1,19 @@ + +package org.zstack.header.secret; + +import org.zstack.header.core.ReturnValueCompletion; + +/** + * Extension point for getting plaintext DEK from key-manager (e.g. premium with NKP/KMS). + * Used for VM (e.g. to send DEK to host via SecretHostDefineMsg). Premium implements with + * key-manager get; success returns dekBase64 (plaintext DEK, base64). + */ +public interface SecretGetExtensionPoint { + /** + * Get plaintext DEK. Implementation (e.g. premium) calls key-manager get. + * + * @param secretNameOrId secret name or id (from create or stored) + * @param completion success(dekBase64) with plaintext DEK in base64, or fail(error) + */ + void getSecret(String secretNameOrId, ReturnValueCompletion completion); +} diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java new file mode 100644 index 00000000000..1d868bd085e --- /dev/null +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java @@ -0,0 +1,30 @@ +package org.zstack.header.secret; + +import org.zstack.header.host.HostMessage; +import org.zstack.header.message.NeedReplyMessage; + +/** + * Request to define secret on KVM host (for VM e.g. vTPM). Caller provides plaintext DEK (dekBase64). + * Host seals it with host public key (HPKE) and sends envelope to agent. + */ +public class SecretHostDefineMsg extends NeedReplyMessage implements HostMessage { + private String hostUuid; + private String dekBase64; + + @Override + public String getHostUuid() { + return hostUuid; + } + + public void setHostUuid(String hostUuid) { + this.hostUuid = hostUuid; + } + + public String getDekBase64() { + return dekBase64; + } + + public void setDekBase64(String dekBase64) { + this.dekBase64 = dekBase64; + } +} \ No newline at end of file diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java new file mode 100644 index 00000000000..2149bbdb3ba --- /dev/null +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java @@ -0,0 +1,28 @@ +package org.zstack.header.secret; + +import org.zstack.header.message.MessageReply; + +/** Reply for SecretHostDefineMsg (define secret on host for VM e.g. vTPM). */ +public class SecretHostDefineReply extends MessageReply { + public static final String ERROR_CODE_KEYS_NOT_ON_DISK = "KEY_AGENT_KEYS_NOT_ON_DISK"; + public static final String ERROR_CODE_KEY_FILES_INTEGRITY_MISMATCH = "KEY_AGENT_KEY_FILES_INTEGRITY_MISMATCH"; + + private String errorCode; + private String secretUuid; + + public String getErrorCode() { + return errorCode; + } + + public void setErrorCode(String errorCode) { + this.errorCode = errorCode; + } + + public String getSecretUuid() { + return secretUuid; + } + + public void setSecretUuid(String secretUuid) { + this.secretUuid = secretUuid; + } +} diff --git a/plugin/kvm/pom.xml b/plugin/kvm/pom.xml index 4cabba3d897..c0f484d0bc7 100755 --- a/plugin/kvm/pom.xml +++ b/plugin/kvm/pom.xml @@ -4,11 +4,16 @@ plugin org.zstack - 4.10.0 + 4.10.0 .. kvm + + org.bouncycastle + bcprov-jdk15on + 1.67 + org.zstack compute diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java new file mode 100644 index 00000000000..21dc57a2fc7 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java @@ -0,0 +1,132 @@ +package org.zstack.kvm; + +import org.bouncycastle.crypto.InvalidCipherTextException; +import org.bouncycastle.crypto.agreement.X25519Agreement; +import org.bouncycastle.crypto.engines.AESEngine; +import org.bouncycastle.crypto.generators.HKDFBytesGenerator; +import org.bouncycastle.crypto.generators.X25519KeyPairGenerator; +import org.bouncycastle.crypto.modes.GCMBlockCipher; +import org.bouncycastle.crypto.params.AEADParameters; +import org.bouncycastle.crypto.params.KeyParameter; +import org.bouncycastle.crypto.params.X25519KeyGenerationParameters; +import org.bouncycastle.crypto.params.X25519PublicKeyParameters; +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA256Digest; +import org.bouncycastle.crypto.AsymmetricCipherKeyPair; + +import java.security.SecureRandom; +import java.util.Arrays; + +/** + * HPKE seal (RFC 9180) compatible with Go: KEM_X25519_HKDF_SHA256, KDF_HKDF_SHA256, AEAD_AES256GCM. + * Seal: encrypt wrapper DEK with host public key; output = enc (32) || ciphertext (for agent to open with private key). + */ +public final class HostSecretEnvelopeCrypto { + private static final String HPKE_V1 = "HPKE-v1"; + private static final byte[] KEM_ID = new byte[]{0x00, 0x20}; // X25519 HKDF-SHA256 + private static final byte[] KDF_ID = new byte[]{0x00, 0x01}; // HKDF-SHA256 + private static final byte[] AEAD_ID = new byte[]{0x00, 0x02}; // AES-256-GCM + private static final byte[] KEM_SUITE_ID = concat("KEM".getBytes(), KEM_ID); // for DHKEM ExtractAndExpand + private static final byte[] SUITE_ID = concat(concat("HPKE".getBytes(), KEM_ID), concat(KDF_ID, AEAD_ID)); + private static final int NH = 32; + private static final int NK = 32; + private static final int NN = 12; + + private static byte[] concat(byte[] a, byte[] b) { + byte[] r = new byte[a.length + b.length]; + System.arraycopy(a, 0, r, 0, a.length); + System.arraycopy(b, 0, r, a.length, b.length); + return r; + } + + private static byte[] i2osp(int n, int w) { + byte[] r = new byte[w]; + for (int i = w - 1; i >= 0; i--) { + r[i] = (byte) (n & 0xff); + n >>= 8; + } + return r; + } + + /** RFC 9180 LabeledExtract(salt, label, ikm); use kemSuiteId for KEM layer, null for HPKE layer (uses SUITE_ID). */ + private static byte[] labeledExtract(byte[] salt, String label, byte[] ikm, Digest digest, byte[] suiteId) { + byte[] sid = suiteId != null ? suiteId : SUITE_ID; + byte[] labeledIkm = concat(concat(concat(HPKE_V1.getBytes(), sid), label.getBytes()), ikm != null ? ikm : new byte[0]); + return hkdfExtract(salt, labeledIkm, digest); + } + + /** RFC 9180 LabeledExpand(prk, label, info, L); use kemSuiteId for KEM layer, null for HPKE layer. */ + private static byte[] labeledExpand(byte[] prk, String label, byte[] info, int L, Digest digest, byte[] suiteId) { + byte[] sid = suiteId != null ? suiteId : SUITE_ID; + byte[] labeledInfo = concat(concat(concat(concat(i2osp(L, 2), HPKE_V1.getBytes()), sid), label.getBytes()), info != null ? info : new byte[0]); + return hkdfExpand(prk, labeledInfo, L, digest); + } + + private static byte[] hkdfExtract(byte[] salt, byte[] ikm, Digest digest) { + HKDFBytesGenerator gen = new HKDFBytesGenerator(digest); + gen.init(new org.bouncycastle.crypto.params.HKDFParameters(ikm, salt != null ? salt : new byte[NH], null)); + byte[] out = new byte[NH]; + gen.generateBytes(out, 0, out.length); + return out; + } + + private static byte[] hkdfExpand(byte[] prk, byte[] info, int L, Digest digest) { + HKDFBytesGenerator gen = new HKDFBytesGenerator(digest); + gen.init(new org.bouncycastle.crypto.params.HKDFParameters(prk, null, info)); + byte[] out = new byte[L]; + gen.generateBytes(out, 0, out.length); + return out; + } + + /** + * Seal plaintext with recipient's X25519 public key (raw 32 bytes). + * Returns envelope = enc (32 bytes) || ciphertext (AEAD output). + */ + public static byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws InvalidCipherTextException { + if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { + throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null"); + } + SecureRandom random = new SecureRandom(); + Digest digest = new SHA256Digest(); + + // 1. Generate ephemeral X25519 key pair (BC crypto) + X25519KeyPairGenerator kpg = new X25519KeyPairGenerator(); + kpg.init(new X25519KeyGenerationParameters(random)); + AsymmetricCipherKeyPair ephemeralKp = kpg.generateKeyPair(); + X25519PublicKeyParameters ephemeralPub = (X25519PublicKeyParameters) ephemeralKp.getPublic(); + byte[] enc = ephemeralPub.getEncoded(); + + // 2. DH shared secret (ephemeral priv, recipient pub) + X25519PublicKeyParameters recipientPub = new X25519PublicKeyParameters(recipientPublicKey, 0); + X25519Agreement agreement = new X25519Agreement(); + agreement.init(ephemeralKp.getPrivate()); + byte[] sharedSecret = new byte[32]; + agreement.calculateAgreement(recipientPub, sharedSecret, 0); + + // 3. KEM shared_secret (DHKEM ExtractAndExpand) with KEM suite_id + byte[] kemContext = concat(enc, recipientPublicKey); + byte[] eaePrk = labeledExtract(new byte[0], "eae_prk", sharedSecret, digest, KEM_SUITE_ID); + byte[] kemSharedSecret = labeledExpand(eaePrk, "shared_secret", kemContext, NH, digest, KEM_SUITE_ID); + + // 4. Key schedule (base mode, empty psk, empty info) with HPKE suite_id + byte[] pskIdHash = labeledExtract(new byte[0], "psk_id_hash", new byte[0], digest, null); + byte[] infoHash = labeledExtract(new byte[0], "info_hash", new byte[0], digest, null); + byte[] keyScheduleContext = concat(new byte[]{0x00}, concat(pskIdHash, infoHash)); + byte[] secret = labeledExtract(kemSharedSecret, "secret", new byte[0], digest, null); + byte[] key = labeledExpand(secret, "key", keyScheduleContext, NK, digest, null); + byte[] baseNonce = labeledExpand(secret, "base_nonce", keyScheduleContext, NN, digest, null); + + // 5. AEAD Seal (AES-256-GCM, empty aad) + GCMBlockCipher cipher = new GCMBlockCipher(new AESEngine()); + cipher.init(true, new AEADParameters(new KeyParameter(key), 128, baseNonce)); + byte[] out = new byte[cipher.getOutputSize(plaintext.length)]; + int len = cipher.processBytes(plaintext, 0, plaintext.length, out, 0); + len += cipher.doFinal(out, len); + byte[] ct = Arrays.copyOf(out, len); + + return concat(enc, ct); + } + + private HostSecretEnvelopeCrypto() { + } +} \ No newline at end of file diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 63a2f9759e7..1efc52b0151 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -372,6 +372,90 @@ public void setQemuVersion(String qemuVersion) { } } + public static class CreatePublicKeyCmd extends AgentCommand { + } + + public static class CreatePublicKeyResponse extends AgentResponse { + } + + public static class GetPublicKeyCmd extends AgentCommand { + } + + public static class GetPublicKeyResponse extends AgentResponse { + private String publicKey; + private String errorCode; + + public String getPublicKey() { + return publicKey; + } + + public void setPublicKey(String publicKey) { + this.publicKey = publicKey; + } + + public String getErrorCode() { + return errorCode; + } + + public void setErrorCode(String errorCode) { + this.errorCode = errorCode; + } + } + + public static class RotatePublicKeyCmd extends AgentCommand { + } + + public static class RotatePublicKeyResponse extends AgentResponse { + } + + public static class VerifyPublicKeyCmd extends AgentCommand { + } + + public static class VerifyPublicKeyResponse extends AgentResponse { + private String errorCode; + + public String getErrorCode() { + return errorCode; + } + + public void setErrorCode(String errorCode) { + this.errorCode = errorCode; + } + } + + public static class SecretHostDefineCmd extends AgentCommand { + private String envelopeDekBase64; + + public String getEnvelopeDekBase64() { + return envelopeDekBase64; + } + + public void setEnvelopeDekBase64(String envelopeDekBase64) { + this.envelopeDekBase64 = envelopeDekBase64; + } + } + + public static class SecretHostDefineResponse extends AgentResponse { + private String errorCode; + private String secretUuid; + + public String getErrorCode() { + return errorCode; + } + + public void setErrorCode(String errorCode) { + this.errorCode = errorCode; + } + + public String getSecretUuid() { + return secretUuid; + } + + public void setSecretUuid(String secretUuid) { + this.secretUuid = secretUuid; + } + } + public static class PingCmd extends AgentCommand { public String hostUuid; public Map configs; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index 575d54016c2..7355ed99f3b 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -122,6 +122,12 @@ public interface KVMConstant { String KVM_UPDATE_HOST_NQN_PATH = "/host/nqn/update"; String KVM_UPDATE_HOSTNAME_PATH = "/host/hostname/update"; + String KVM_CREATE_ENVELOPE_KEY_PATH = "/host/key/envelope/createEnvelopeKey"; + String KVM_GET_ENVELOPE_KEY_PATH = "/host/key/envelope/getEnvelopePublicKey"; + String KVM_ROTATE_ENVELOPE_KEY_PATH = "/host/key/envelope/rotateEnvelopeKey"; + String KVM_VERIFY_ENVELOPE_KEY_PATH = "/host/key/envelope/checkEnvelopeKey"; + String KVM_ENSURE_SECRET_PATH = "/host/key/envelope/ensureSecret"; + String KVM_HOST_FILE_DOWNLOAD_PATH = "/host/file/download"; String KVM_HOST_FILE_UPLOAD_PATH = "/host/file/upload"; String KVM_HOST_FILE_DOWNLOAD_PROGRESS_PATH = "/host/file/progress"; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index adcfc545808..0a5b85056fc 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -55,6 +55,8 @@ import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.host.*; import org.zstack.header.host.MigrateVmOnHypervisorMsg.StorageMigrationPolicy; +import org.zstack.header.secret.SecretHostDefineMsg; +import org.zstack.header.secret.SecretHostDefineReply; import org.zstack.header.message.APIMessage; import org.zstack.header.message.Message; import org.zstack.header.message.MessageReply; @@ -748,6 +750,8 @@ protected void handleLocalMessage(Message msg) { handle((GetFileDownloadProgressMsg) msg); } else if (msg instanceof RestartKvmAgentMsg) { handle((RestartKvmAgentMsg) msg); + } else if (msg instanceof SecretHostDefineMsg) { + handle((SecretHostDefineMsg) msg); } else { super.handleLocalMessage(msg); } @@ -3861,6 +3865,23 @@ protected void stopVm(final StopVmOnHypervisorMsg msg, final NoErrorCompletion c checkStatus(); final VmInstanceInventory vminv = msg.getVmInventory(); + { + String dekBase64 = "dGVzdERFSw=="; + SecretHostDefineMsg defineMsg = new SecretHostDefineMsg(); + defineMsg.setHostUuid(getSelf().getUuid()); + defineMsg.setDekBase64(dekBase64); + bus.makeTargetServiceIdByResourceUuid(defineMsg, HostConstant.SERVICE_ID, getSelf().getUuid()); + MessageReply defineReply = bus.call(defineMsg); + if (!defineReply.isSuccess()) { + logger.warn(String.format("debug SecretDefine before stop vm[uuid:%s] failed: %s", vminv.getUuid(), defineReply.getError())); + } else { + SecretHostDefineReply srep = defineReply.castReply(); + if (srep != null && srep.getSecretUuid() != null) { + logger.info(String.format("debug SecretDefine before stop vm[uuid:%s] success, secretUuid=%s", vminv.getUuid(), srep.getSecretUuid())); + } + } + } + StopVmCmd cmd = new StopVmCmd(); cmd.setUuid(vminv.getUuid()); cmd.setType(msg.getType()); @@ -5283,6 +5304,21 @@ public void fail(ErrorCode errorCode) { } }); + flow(new NoRollbackFlow() { + String __name__ = "sync-secret-key-after-ping"; + + @Override + public boolean skip(Map data) { + return data.get(KVMConstant.KVM_HOST_SKIP_PING_NO_FAILURE_EXTENSIONS) != null; + } + + @Override + public void run(FlowTrigger trigger, Map data) { + syncEnvelopeKeyAfterPing(); + trigger.next(); + } + }); + done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { @@ -5300,6 +5336,236 @@ public void handle(ErrorCode errCode, Map data) { }).start(); } + private void syncEnvelopeKeyAfterPing() { + KVMHostVO kvo = dbf.reload(getSelf()); + String hostUuid = kvo.getUuid(); + try { + HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); + if (identity != null && StringUtils.isNotBlank(identity.getPublicKey())) { + String verifyUrl = buildUrl(KVMConstant.KVM_VERIFY_ENVELOPE_KEY_PATH); + KVMAgentCommands.VerifyPublicKeyResponse vrsp = restf.syncJsonPost(verifyUrl, + new KVMAgentCommands.VerifyPublicKeyCmd(), KVMAgentCommands.VerifyPublicKeyResponse.class); + if (vrsp != null && vrsp.isSuccess()) { + setHostKeyIdentityVerified(hostUuid, true); + return; + } + if (vrsp != null && !vrsp.isSuccess() && isRotateNeededGetError(vrsp.getErrorCode())) { + String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); + KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, + new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); + if (rotateRsp.isSuccess()) { + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, + new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); + if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + return; + } + } else { + logger.warn("verify failed then rotate key on agent failed for host " + hostUuid + ": " + rotateRsp.getError()); + } + } + setHostKeyIdentityVerified(hostUuid, false); + return; + } + String createUrl = buildUrl(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH); + KVMAgentCommands.CreatePublicKeyResponse createRsp = restf.syncJsonPost(createUrl, + new KVMAgentCommands.CreatePublicKeyCmd(), KVMAgentCommands.CreatePublicKeyResponse.class); + if (!createRsp.isSuccess()) { + logger.warn("create key on agent failed for host " + hostUuid + ": " + createRsp.getError()); + setHostKeyIdentityVerified(hostUuid, false); + return; + } + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, + new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); + if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + return; + } + if (!getRsp.isSuccess() && isRotateNeededGetError(getRsp.getErrorCode())) { + String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); + KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, + new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); + if (!rotateRsp.isSuccess()) { + logger.warn("rotate key on agent failed for host " + hostUuid + ": " + rotateRsp.getError()); + setHostKeyIdentityVerified(hostUuid, false); + return; + } + getRsp = restf.syncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); + if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + return; + } + } + logger.warn("get public key from agent failed for host " + hostUuid + ": " + (getRsp != null ? getRsp.getError() : "null")); + setHostKeyIdentityVerified(hostUuid, false); + } catch (Exception e) { + logger.warn("sync secret key after connect failed for host " + hostUuid + ": " + e.getMessage()); + try { + setHostKeyIdentityVerified(hostUuid, false); + } catch (Exception ignored) { + } + } + } + + private void setHostKeyIdentityVerified(String hostUuid, boolean verified) { + HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); + if (vo != null) { + vo.setVerified(verified); + dbf.update(vo); + } + } + + private static boolean isRotateNeededGetError(String errorCode) { + if (errorCode == null) return false; + return SecretHostDefineReply.ERROR_CODE_KEYS_NOT_ON_DISK.equals(errorCode) + || SecretHostDefineReply.ERROR_CODE_KEY_FILES_INTEGRITY_MISMATCH.equals(errorCode); + } + + private HostKeyIdentityVO getHostKeyIdentity(String hostUuid) { + SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class); + q.add(HostKeyIdentityVO_.hostUuid, Op.EQ, hostUuid); + return q.find(); + } + + private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, boolean verified) { + if (StringUtils.isBlank(publicKey)) { + return; + } + HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); + if (vo == null) { + vo = new HostKeyIdentityVO(); + vo.setHostUuid(hostUuid); + vo.setFingerprint(""); + vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); + dbf.persist(vo); + } + vo.setPublicKey(publicKey.trim()); + vo.setVerified(verified); + dbf.update(vo); + } + + private void handle(SecretHostDefineMsg msg) { + SecretHostDefineReply reply = new SecretHostDefineReply(); + if (org.apache.commons.lang.StringUtils.isBlank(msg.getDekBase64())) { + reply.setError(operr("dekBase64 is required")); + bus.reply(msg, reply); + return; + } + String hostUuid = getSelf().getUuid(); + HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); + String pubKey = identity != null ? org.apache.commons.lang.StringUtils.trimToNull(identity.getPublicKey()) : null; + Boolean verifyOk = identity != null ? identity.getVerified() : null; + if (pubKey == null) { + reply.setError(operr("no public key for host, connect/reconnect did not sync key")); + bus.reply(msg, reply); + return; + } + if (!Boolean.TRUE.equals(verifyOk)) { + reply.setError(operr("host secret key verify not ok, not synced")); + bus.reply(msg, reply); + return; + } + byte[] dekRaw; + try { + dekRaw = java.util.Base64.getDecoder().decode(msg.getDekBase64().trim()); + } catch (IllegalArgumentException e) { + reply.setError(operr("invalid dekBase64: %s", e.getMessage())); + bus.reply(msg, reply); + return; + } + if (dekRaw == null || dekRaw.length == 0) { + reply.setError(operr("dekBase64 decoded to empty")); + bus.reply(msg, reply); + return; + } + + byte[] pubKeyBytes; + try { + pubKeyBytes = java.util.Base64.getDecoder().decode(pubKey); + } catch (IllegalArgumentException e) { + reply.setError(operr("invalid host public key in DB: %s", e.getMessage())); + bus.reply(msg, reply); + return; + } + if (pubKeyBytes == null || pubKeyBytes.length != 32) { + reply.setError(operr("host public key must be 32 bytes (X25519)")); + bus.reply(msg, reply); + return; + } + byte[] envelope; + try { + envelope = HostSecretEnvelopeCrypto.seal(pubKeyBytes, dekRaw); + } catch (org.bouncycastle.crypto.InvalidCipherTextException e) { + reply.setError(operr("HPKE seal failed: %s", e.getMessage())); + bus.reply(msg, reply); + return; + } + String envelopeDekBase64 = java.util.Base64.getEncoder().encodeToString(envelope); + try { + String url = buildUrl(KVMConstant.KVM_ENSURE_SECRET_PATH); + KVMAgentCommands.SecretHostDefineCmd cmd = new KVMAgentCommands.SecretHostDefineCmd(); + cmd.setEnvelopeDekBase64(envelopeDekBase64); + KVMAgentCommands.SecretHostDefineResponse rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); + if (rsp.isSuccess()) { + if (rsp.getSecretUuid() != null) { + reply.setSecretUuid(rsp.getSecretUuid()); + } + bus.reply(msg, reply); + return; + } + if (isRotateNeededGetError(rsp.getErrorCode())) { + String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); + KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, + new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); + if (!rotateRsp.isSuccess()) { + reply.setError(operr("ensure secret failed, rotate key then retry failed: %s", rotateRsp.getError())); + bus.reply(msg, reply); + return; + } + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, + new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); + if (!getRsp.isSuccess() || StringUtils.isBlank(getRsp.getPublicKey())) { + reply.setError(operr("ensure secret failed, rotate then get public key failed: %s", + getRsp != null ? getRsp.getError() : "null")); + bus.reply(msg, reply); + return; + } + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + String newPubKey = getRsp.getPublicKey().trim(); + byte[] newPubKeyBytes = java.util.Base64.getDecoder().decode(newPubKey); + byte[] newEnvelope; + try { + newEnvelope = HostSecretEnvelopeCrypto.seal(newPubKeyBytes, dekRaw); + } catch (org.bouncycastle.crypto.InvalidCipherTextException e) { + reply.setError(operr("ensure secret failed after rotate, HPKE seal failed: %s", e.getMessage())); + bus.reply(msg, reply); + return; + } + String newEnvelopeDekBase64 = java.util.Base64.getEncoder().encodeToString(newEnvelope); + cmd.setEnvelopeDekBase64(newEnvelopeDekBase64); + rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); + if (rsp.isSuccess()) { + if (rsp.getSecretUuid() != null) { + reply.setSecretUuid(rsp.getSecretUuid()); + } + bus.reply(msg, reply); + return; + } + } + reply.setError(operr(rsp.getError())); + if (rsp.getErrorCode() != null) { + reply.setErrorCode(rsp.getErrorCode()); + } + bus.reply(msg, reply); + } catch (RestClientException e) { + reply.setError(operr("ensure secret on agent failed: %s", e.getMessage())); + bus.reply(msg, reply); + } + } + @Override protected void deleteTakeOverFlag(Completion completion) { if (CoreGlobalProperty.UNIT_TEST_ON) { diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy new file mode 100644 index 00000000000..86708eba7d4 --- /dev/null +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -0,0 +1,149 @@ +package org.zstack.test.integration.kvm.host + +import org.zstack.core.Platform +import org.zstack.core.cloudbus.CloudBus +import org.zstack.header.host.AddHostReply +import org.zstack.header.host.HostConstant +import org.zstack.header.host.HostInventory +import org.zstack.header.host.HostStatus +import org.zstack.header.message.MessageReply +import org.zstack.kvm.AddKVMHostMsg +import org.zstack.kvm.KVMConstant +import org.zstack.kvm.KVMAgentCommands +import org.zstack.storage.primary.local.LocalStorageKvmBackend +import org.zstack.test.integration.kvm.KvmTest +import org.zstack.testlib.EnvSpec +import org.zstack.testlib.SubCase +import org.zstack.header.secret.SecretHostDefineMsg +import org.zstack.header.secret.SecretHostDefineReply + +/** + * Integration test for host secret: create/get/rotate/verify public key on connect, + * and SecretHostDefine (ensure secret on agent). + * Uses simulated agent for all secret paths. + */ +class HostSecretCase extends SubCase { + EnvSpec env + def cluster + CloudBus bus + HostInventory addedHost + + /** 32-byte X25519 public key (base64) for simulator; must be valid for HPKE seal. */ + static final String MOCK_PUBLIC_KEY_BASE64 = "AQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyA=" + + @Override + void setup() { + useSpring(KvmTest.springSpec) + } + + @Override + void environment() { + env = HostEnv.noHostBasicEnv() + } + + @Override + void test() { + env.create { + prepare() + testAddHostWithSecretSync() + testSecretHostDefineSuccess() + testSecretHostDefineFailWhenNoDek() + } + } + + @Override + void clean() { + env.delete() + } + + void prepare() { + cluster = env.inventoryByName("cluster") + bus = bean(CloudBus.class) + } + + void registerSecretSimulators() { + env.simulator(KVMConstant.KVM_CONNECT_PATH) { + def rsp = new KVMAgentCommands.ConnectResponse() + rsp.success = true + rsp.libvirtVersion = "1.0.0" + rsp.qemuVersion = "1.3.0" + return rsp + } + env.simulator(KVMConstant.KVM_HOST_FACT_PATH) { + def rsp = new KVMAgentCommands.HostFactResponse() + rsp.osDistribution = "CentOS" + rsp.osVersion = "7.0" + return rsp + } + env.simulator(LocalStorageKvmBackend.INIT_PATH) { rsp, _ -> return rsp } + + env.simulator(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH) { + return new KVMAgentCommands.CreatePublicKeyResponse() + } + env.simulator(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH) { + def rsp = new KVMAgentCommands.GetPublicKeyResponse() + rsp.publicKey = MOCK_PUBLIC_KEY_BASE64 + return rsp + } + env.simulator(KVMConstant.KVM_VERIFY_ENVELOPE_KEY_PATH) { + return new KVMAgentCommands.VerifyPublicKeyResponse() + } + env.simulator(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH) { + return new KVMAgentCommands.RotatePublicKeyResponse() + } + env.simulator(KVMConstant.KVM_ENSURE_SECRET_PATH) { + def rsp = new KVMAgentCommands.SecretHostDefineResponse() + rsp.secretUuid = Platform.uuid + return rsp + } + } + + void testAddHostWithSecretSync() { + registerSecretSimulators() + + AddKVMHostMsg amsg = new AddKVMHostMsg() + amsg.accountUuid = loginAsAdmin().accountUuid + amsg.name = "kvm" + amsg.managementIp = "127.0.0.2" + amsg.resourceUuid = Platform.uuid + amsg.clusterUuid = cluster.uuid + amsg.setPassword("password") + amsg.setUsername("root") + + bus.makeLocalServiceId(amsg, HostConstant.SERVICE_ID) + AddHostReply reply = (AddHostReply) bus.call(amsg) + assert reply != null + assert reply.isSuccess() + assert reply.inventory.status == HostStatus.Connected.toString() + addedHost = reply.inventory + } + + void testSecretHostDefineSuccess() { + assert addedHost != null + + SecretHostDefineMsg msg = new SecretHostDefineMsg() + msg.hostUuid = addedHost.uuid + msg.dekBase64 = "dGVzdERFSw==" // base64 of "testDEK" + + bus.makeTargetServiceIdByResourceUuid(msg, HostConstant.SERVICE_ID, addedHost.uuid) + MessageReply reply = bus.call(msg) + assert reply != null + assert reply.isSuccess() + SecretHostDefineReply defineReply = reply.castReply() + assert defineReply.secretUuid != null + } + + void testSecretHostDefineFailWhenNoDek() { + assert addedHost != null + + SecretHostDefineMsg msg = new SecretHostDefineMsg() + msg.hostUuid = addedHost.uuid + msg.dekBase64 = null + + bus.makeTargetServiceIdByResourceUuid(msg, HostConstant.SERVICE_ID, addedHost.uuid) + MessageReply reply = bus.call(msg) + assert reply != null + assert !reply.isSuccess() + assert reply.error != null + } +} \ No newline at end of file From a0ffd8dd14adf56ab796fd34aa7f7fdef9c37f77 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:18:24 +0800 Subject: [PATCH 41/76] Add verified to HostKeyIdentityVO --- .../org/zstack/header/host/HostKeyIdentityVO.java | 11 +++++++++++ .../org/zstack/header/host/HostKeyIdentityVO_.java | 1 + 2 files changed, 12 insertions(+) diff --git a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java index b37e6a8ce84..e77873d70fe 100644 --- a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java +++ b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java @@ -25,6 +25,9 @@ public class HostKeyIdentityVO { @Column private String fingerprint; + @Column(nullable = false) + private Boolean verified = false; + @Column private Timestamp createDate; @@ -60,6 +63,14 @@ public void setFingerprint(String fingerprint) { this.fingerprint = fingerprint; } + public Boolean getVerified() { + return verified; + } + + public void setVerified(Boolean verified) { + this.verified = verified; + } + public Timestamp getCreateDate() { return createDate; } diff --git a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java index a43d01375a0..a2bcfa7b850 100644 --- a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java +++ b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO_.java @@ -9,6 +9,7 @@ public class HostKeyIdentityVO_ { public static volatile SingularAttribute hostUuid; public static volatile SingularAttribute publicKey; public static volatile SingularAttribute fingerprint; + public static volatile SingularAttribute verified; public static volatile SingularAttribute createDate; public static volatile SingularAttribute lastOpDate; } From 0462306c07df2538c6ae2395c5b7812dbc65156a Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:21:13 +0800 Subject: [PATCH 42/76] Rename sync-envelope-public-key --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 0a5b85056fc..3b31d82c4b9 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5305,7 +5305,7 @@ public void fail(ErrorCode errorCode) { }); flow(new NoRollbackFlow() { - String __name__ = "sync-secret-key-after-ping"; + String __name__ = "sync-envelope-public-key"; @Override public boolean skip(Map data) { From 0150b21da4bd3b5bc10354fcab4257c273a14278 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:43:34 +0800 Subject: [PATCH 43/76] Place holder public key --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 3b31d82c4b9..87b29a8b0a2 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5433,15 +5433,19 @@ private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, bool if (StringUtils.isBlank(publicKey)) { return; } + String keyToSave = publicKey.trim(); HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); if (vo == null) { vo = new HostKeyIdentityVO(); vo.setHostUuid(hostUuid); + vo.setPublicKey(keyToSave); vo.setFingerprint(""); + vo.setVerified(verified); vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); dbf.persist(vo); + return; } - vo.setPublicKey(publicKey.trim()); + vo.setPublicKey(keyToSave); vo.setVerified(verified); dbf.update(vo); } From 974ca9f085bb96c43eeaaa14e689aa036ca530d0 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:44:41 +0800 Subject: [PATCH 44/76] Place holder public key --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 87b29a8b0a2..28a4bb07227 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5438,9 +5438,9 @@ private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, bool if (vo == null) { vo = new HostKeyIdentityVO(); vo.setHostUuid(hostUuid); - vo.setPublicKey(keyToSave); + vo.setPublicKey(""); vo.setFingerprint(""); - vo.setVerified(verified); + vo.setVerified(false); vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); dbf.persist(vo); return; From 896bdf0f95475765aacadc5263cc56e32eaf2825 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 14:45:39 +0800 Subject: [PATCH 45/76] Place holder public key --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 28a4bb07227..87b29a8b0a2 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5438,9 +5438,9 @@ private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, bool if (vo == null) { vo = new HostKeyIdentityVO(); vo.setHostUuid(hostUuid); - vo.setPublicKey(""); + vo.setPublicKey(keyToSave); vo.setFingerprint(""); - vo.setVerified(false); + vo.setVerified(verified); vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); dbf.persist(vo); return; From f42e62d988d9b0e61885bb40c30df9d9615c2800 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 15:30:24 +0800 Subject: [PATCH 46/76] Remove rotate public key in initial pingHook --- .../secret/SecretCreateExtensionPoint.java | 0 .../src/main/java/org/zstack/kvm/KVMHost.java | 24 ------------------- 2 files changed, 24 deletions(-) rename "header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" => header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java (100%) diff --git "a/header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" b/header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java similarity index 100% rename from "header/src/main/java/org/zstack/header/secret/SecretCreateExt \342\200\216ensionPoint.java" rename to header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 87b29a8b0a2..607f65c5737 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5376,30 +5376,6 @@ private void syncEnvelopeKeyAfterPing() { setHostKeyIdentityVerified(hostUuid, false); return; } - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, - new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); - if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - return; - } - if (!getRsp.isSuccess() && isRotateNeededGetError(getRsp.getErrorCode())) { - String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); - KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, - new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); - if (!rotateRsp.isSuccess()) { - logger.warn("rotate key on agent failed for host " + hostUuid + ": " + rotateRsp.getError()); - setHostKeyIdentityVerified(hostUuid, false); - return; - } - getRsp = restf.syncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); - if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - return; - } - } - logger.warn("get public key from agent failed for host " + hostUuid + ": " + (getRsp != null ? getRsp.getError() : "null")); - setHostKeyIdentityVerified(hostUuid, false); } catch (Exception e) { logger.warn("sync secret key after connect failed for host " + hostUuid + ": " + e.getMessage()); try { From 30412aca6eefbee9ced8c0c4678ebfad99fdbac0 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 16:00:20 +0800 Subject: [PATCH 47/76] Add variable to SecretHostDefineCmd --- .../header/secret/SecretHostDefineMsg.java | 37 +++++++++++++++ .../java/org/zstack/kvm/KVMAgentCommands.java | 46 +++++++++++++++++-- .../src/main/java/org/zstack/kvm/KVMHost.java | 16 ++++++- 3 files changed, 93 insertions(+), 6 deletions(-) diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java index 1d868bd085e..4305af89491 100644 --- a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java @@ -6,10 +6,15 @@ /** * Request to define secret on KVM host (for VM e.g. vTPM). Caller provides plaintext DEK (dekBase64). * Host seals it with host public key (HPKE) and sends envelope to agent. + * vmUuid, purpose, providerName are required by key-agent for DEK cache key. */ public class SecretHostDefineMsg extends NeedReplyMessage implements HostMessage { private String hostUuid; private String dekBase64; + private String vmUuid; + private String purpose; + private String providerName; + private String description; @Override public String getHostUuid() { @@ -27,4 +32,36 @@ public String getDekBase64() { public void setDekBase64(String dekBase64) { this.dekBase64 = dekBase64; } + + public String getVmUuid() { + return vmUuid; + } + + public void setVmUuid(String vmUuid) { + this.vmUuid = vmUuid; + } + + public String getPurpose() { + return purpose; + } + + public void setPurpose(String purpose) { + this.purpose = purpose; + } + + public String getProviderName() { + return providerName; + } + + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } } \ No newline at end of file diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 1efc52b0151..eb121f58c32 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -425,13 +425,51 @@ public void setErrorCode(String errorCode) { public static class SecretHostDefineCmd extends AgentCommand { private String envelopeDekBase64; + /** Base64 wrapped DEK; agent expects this field name (encryptedDek). */ + private String encryptedDek; + private String vmUuid; + private String purpose; + private String providerName; + private String description; + + public String getEncryptedDek() { + return encryptedDek; + } + + public void setEncryptedDek(String encryptedDek) { + this.encryptedDek = encryptedDek; + } + + public String getVmUuid() { + return vmUuid; + } + + public void setVmUuid(String vmUuid) { + this.vmUuid = vmUuid; + } + + public String getPurpose() { + return purpose; + } + + public void setPurpose(String purpose) { + this.purpose = purpose; + } + + public String getProviderName() { + return providerName; + } + + public void setProviderName(String providerName) { + this.providerName = providerName; + } - public String getEnvelopeDekBase64() { - return envelopeDekBase64; + public String getDescription() { + return description; } - public void setEnvelopeDekBase64(String envelopeDekBase64) { - this.envelopeDekBase64 = envelopeDekBase64; + public void setDescription(String description) { + this.description = description; } } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 607f65c5737..1e841b7d2d1 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -3870,6 +3870,9 @@ protected void stopVm(final StopVmOnHypervisorMsg msg, final NoErrorCompletion c SecretHostDefineMsg defineMsg = new SecretHostDefineMsg(); defineMsg.setHostUuid(getSelf().getUuid()); defineMsg.setDekBase64(dekBase64); + defineMsg.setVmUuid(vminv.getUuid()); + defineMsg.setPurpose("vm"); + defineMsg.setProviderName("zstack"); bus.makeTargetServiceIdByResourceUuid(defineMsg, HostConstant.SERVICE_ID, getSelf().getUuid()); MessageReply defineReply = bus.call(defineMsg); if (!defineReply.isSuccess()) { @@ -5433,6 +5436,11 @@ private void handle(SecretHostDefineMsg msg) { bus.reply(msg, reply); return; } + if (StringUtils.isBlank(msg.getVmUuid()) || StringUtils.isBlank(msg.getPurpose()) || StringUtils.isBlank(msg.getProviderName())) { + reply.setError(operr("vmUuid, purpose and providerName are required for ensure secret")); + bus.reply(msg, reply); + return; + } String hostUuid = getSelf().getUuid(); HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); String pubKey = identity != null ? org.apache.commons.lang.StringUtils.trimToNull(identity.getPublicKey()) : null; @@ -5486,7 +5494,11 @@ private void handle(SecretHostDefineMsg msg) { try { String url = buildUrl(KVMConstant.KVM_ENSURE_SECRET_PATH); KVMAgentCommands.SecretHostDefineCmd cmd = new KVMAgentCommands.SecretHostDefineCmd(); - cmd.setEnvelopeDekBase64(envelopeDekBase64); + cmd.setEncryptedDek(envelopeDekBase64); + cmd.setVmUuid(msg.getVmUuid()); + cmd.setPurpose(msg.getPurpose()); + cmd.setProviderName(msg.getProviderName()); + cmd.setDescription(msg.getDescription() != null ? msg.getDescription() : ""); KVMAgentCommands.SecretHostDefineResponse rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); if (rsp.isSuccess()) { if (rsp.getSecretUuid() != null) { @@ -5525,7 +5537,7 @@ private void handle(SecretHostDefineMsg msg) { return; } String newEnvelopeDekBase64 = java.util.Base64.getEncoder().encodeToString(newEnvelope); - cmd.setEnvelopeDekBase64(newEnvelopeDekBase64); + cmd.setEncryptedDek(newEnvelopeDekBase64); rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); if (rsp.isSuccess()) { if (rsp.getSecretUuid() != null) { From 09d5362a3f5c9e9077e45880ae6325429f5ca241 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 17:03:24 +0800 Subject: [PATCH 48/76] Remove rotate public key when define secret --- .../zstack/kvm/HostSecretEnvelopeCrypto.java | 37 ++++++++++++----- .../src/main/java/org/zstack/kvm/KVMHost.java | 40 ------------------- 2 files changed, 28 insertions(+), 49 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java index 21dc57a2fc7..acaca72d4f1 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java @@ -5,8 +5,10 @@ import org.bouncycastle.crypto.engines.AESEngine; import org.bouncycastle.crypto.generators.HKDFBytesGenerator; import org.bouncycastle.crypto.generators.X25519KeyPairGenerator; +import org.bouncycastle.crypto.macs.HMac; import org.bouncycastle.crypto.modes.GCMBlockCipher; import org.bouncycastle.crypto.params.AEADParameters; +import org.bouncycastle.crypto.params.HKDFParameters; import org.bouncycastle.crypto.params.KeyParameter; import org.bouncycastle.crypto.params.X25519KeyGenerationParameters; import org.bouncycastle.crypto.params.X25519PublicKeyParameters; @@ -14,15 +16,19 @@ import org.bouncycastle.crypto.digests.SHA256Digest; import org.bouncycastle.crypto.AsymmetricCipherKeyPair; +import java.nio.charset.StandardCharsets; import java.security.SecureRandom; import java.util.Arrays; /** - * HPKE seal (RFC 9180) compatible with Go: KEM_X25519_HKDF_SHA256, KDF_HKDF_SHA256, AEAD_AES256GCM. + * HPKE seal (RFC 9180) compatible with Go key-agent: KEM_X25519_HKDF_SHA256, KDF_HKDF_SHA256, AEAD_AES256GCM. * Seal: encrypt wrapper DEK with host public key; output = enc (32) || ciphertext (for agent to open with private key). + * Must use the same HPKE "info" as key-agent (cmd/key-agent: info := []byte("key-agent hpke info")) for key schedule. */ public final class HostSecretEnvelopeCrypto { private static final String HPKE_V1 = "HPKE-v1"; + /** HPKE application info; must match key-agent main.go: info := []byte("key-agent hpke info") */ + private static final byte[] HPKE_INFO = "key-agent hpke info".getBytes(StandardCharsets.UTF_8); private static final byte[] KEM_ID = new byte[]{0x00, 0x20}; // X25519 HKDF-SHA256 private static final byte[] KDF_ID = new byte[]{0x00, 0x01}; // HKDF-SHA256 private static final byte[] AEAD_ID = new byte[]{0x00, 0x02}; // AES-256-GCM @@ -62,17 +68,30 @@ private static byte[] labeledExpand(byte[] prk, String label, byte[] info, int L return hkdfExpand(prk, labeledInfo, L, digest); } + /** + * RFC 5869 / RFC 9180 HKDF-Extract: returns PRK = HMAC-Hash(salt, IKM). + * Must not use HKDFBytesGenerator with full init (that does Extract+Expand); Bouncy Castle + * would then return Expand(PRK, "", L) instead of PRK. We implement Extract only via HMAC. + */ private static byte[] hkdfExtract(byte[] salt, byte[] ikm, Digest digest) { - HKDFBytesGenerator gen = new HKDFBytesGenerator(digest); - gen.init(new org.bouncycastle.crypto.params.HKDFParameters(ikm, salt != null ? salt : new byte[NH], null)); - byte[] out = new byte[NH]; - gen.generateBytes(out, 0, out.length); - return out; + byte[] saltBytes = (salt != null && salt.length > 0) ? salt : new byte[NH]; + HMac hmac = new HMac(digest); + hmac.init(new KeyParameter(saltBytes)); + hmac.update(ikm, 0, ikm.length); + byte[] prk = new byte[NH]; + hmac.doFinal(prk, 0); + return prk; } + /** + * RFC 5869 / RFC 9180 HKDF-Expand: OKM = HKDF-Expand(PRK, info, L). + * Must use skipExtractParameters(prk, info) so that Bouncy Castle uses prk as PRK and + * only performs Expand. Using HKDFParameters(prk, null, info) would make BC do + * Extract(null, prk) then Expand, which is wrong. + */ private static byte[] hkdfExpand(byte[] prk, byte[] info, int L, Digest digest) { HKDFBytesGenerator gen = new HKDFBytesGenerator(digest); - gen.init(new org.bouncycastle.crypto.params.HKDFParameters(prk, null, info)); + gen.init(HKDFParameters.skipExtractParameters(prk, info != null ? info : new byte[0])); byte[] out = new byte[L]; gen.generateBytes(out, 0, out.length); return out; @@ -108,9 +127,9 @@ public static byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws In byte[] eaePrk = labeledExtract(new byte[0], "eae_prk", sharedSecret, digest, KEM_SUITE_ID); byte[] kemSharedSecret = labeledExpand(eaePrk, "shared_secret", kemContext, NH, digest, KEM_SUITE_ID); - // 4. Key schedule (base mode, empty psk, empty info) with HPKE suite_id + // 4. Key schedule (base mode, empty psk; info must match key-agent NewReceiver(sk, info)) byte[] pskIdHash = labeledExtract(new byte[0], "psk_id_hash", new byte[0], digest, null); - byte[] infoHash = labeledExtract(new byte[0], "info_hash", new byte[0], digest, null); + byte[] infoHash = labeledExtract(new byte[0], "info_hash", HPKE_INFO, digest, null); byte[] keyScheduleContext = concat(new byte[]{0x00}, concat(pskIdHash, infoHash)); byte[] secret = labeledExtract(kemSharedSecret, "secret", new byte[0], digest, null); byte[] key = labeledExpand(secret, "key", keyScheduleContext, NK, digest, null); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 1e841b7d2d1..45b3c91ba74 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5507,46 +5507,6 @@ private void handle(SecretHostDefineMsg msg) { bus.reply(msg, reply); return; } - if (isRotateNeededGetError(rsp.getErrorCode())) { - String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); - KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, - new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); - if (!rotateRsp.isSuccess()) { - reply.setError(operr("ensure secret failed, rotate key then retry failed: %s", rotateRsp.getError())); - bus.reply(msg, reply); - return; - } - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, - new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); - if (!getRsp.isSuccess() || StringUtils.isBlank(getRsp.getPublicKey())) { - reply.setError(operr("ensure secret failed, rotate then get public key failed: %s", - getRsp != null ? getRsp.getError() : "null")); - bus.reply(msg, reply); - return; - } - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - String newPubKey = getRsp.getPublicKey().trim(); - byte[] newPubKeyBytes = java.util.Base64.getDecoder().decode(newPubKey); - byte[] newEnvelope; - try { - newEnvelope = HostSecretEnvelopeCrypto.seal(newPubKeyBytes, dekRaw); - } catch (org.bouncycastle.crypto.InvalidCipherTextException e) { - reply.setError(operr("ensure secret failed after rotate, HPKE seal failed: %s", e.getMessage())); - bus.reply(msg, reply); - return; - } - String newEnvelopeDekBase64 = java.util.Base64.getEncoder().encodeToString(newEnvelope); - cmd.setEncryptedDek(newEnvelopeDekBase64); - rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); - if (rsp.isSuccess()) { - if (rsp.getSecretUuid() != null) { - reply.setSecretUuid(rsp.getSecretUuid()); - } - bus.reply(msg, reply); - return; - } - } reply.setError(operr(rsp.getError())); if (rsp.getErrorCode() != null) { reply.setErrorCode(rsp.getErrorCode()); From be7aaad1a0f0548f1b9477fa0941f215e1f1472b Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 17:42:15 +0800 Subject: [PATCH 49/76] Envelope public key async call kvm agent --- .../src/main/java/org/zstack/kvm/KVMHost.java | 199 +++++++++++++----- 1 file changed, 144 insertions(+), 55 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 45b3c91ba74..02f6f6e0246 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5317,8 +5317,17 @@ public boolean skip(Map data) { @Override public void run(FlowTrigger trigger, Map data) { - syncEnvelopeKeyAfterPing(); - trigger.next(); + syncEnvelopeKeyAfterPing(new Completion(trigger) { + @Override + public void success() { + trigger.next(); + } + + @Override + public void fail(ErrorCode errCode) { + trigger.next(); + } + }); } }); @@ -5339,52 +5348,123 @@ public void handle(ErrorCode errCode, Map data) { }).start(); } - private void syncEnvelopeKeyAfterPing() { + private static final long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; + + private void syncEnvelopeKeyAfterPing(Completion completion) { KVMHostVO kvo = dbf.reload(getSelf()); - String hostUuid = kvo.getUuid(); + final String hostUuid = kvo.getUuid(); try { HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); if (identity != null && StringUtils.isNotBlank(identity.getPublicKey())) { String verifyUrl = buildUrl(KVMConstant.KVM_VERIFY_ENVELOPE_KEY_PATH); - KVMAgentCommands.VerifyPublicKeyResponse vrsp = restf.syncJsonPost(verifyUrl, - new KVMAgentCommands.VerifyPublicKeyCmd(), KVMAgentCommands.VerifyPublicKeyResponse.class); - if (vrsp != null && vrsp.isSuccess()) { - setHostKeyIdentityVerified(hostUuid, true); - return; - } - if (vrsp != null && !vrsp.isSuccess() && isRotateNeededGetError(vrsp.getErrorCode())) { - String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); - KVMAgentCommands.RotatePublicKeyResponse rotateRsp = restf.syncJsonPost(rotateUrl, - new KVMAgentCommands.RotatePublicKeyCmd(), KVMAgentCommands.RotatePublicKeyResponse.class); - if (rotateRsp.isSuccess()) { - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - KVMAgentCommands.GetPublicKeyResponse getRsp = restf.syncJsonPost(getUrl, - new KVMAgentCommands.GetPublicKeyCmd(), KVMAgentCommands.GetPublicKeyResponse.class); - if (getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - return; - } - } else { - logger.warn("verify failed then rotate key on agent failed for host " + hostUuid + ": " + rotateRsp.getError()); - } - } - setHostKeyIdentityVerified(hostUuid, false); + restf.asyncJsonPost(verifyUrl, new KVMAgentCommands.VerifyPublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.VerifyPublicKeyResponse vrsp) { + if (vrsp != null && vrsp.isSuccess()) { + setHostKeyIdentityVerified(hostUuid, true); + completion.success(); + return; + } + if (vrsp != null && !vrsp.isSuccess() && isRotateNeededGetError(vrsp.getErrorCode())) { + String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); + restf.asyncJsonPost(rotateUrl, new KVMAgentCommands.RotatePublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.RotatePublicKeyResponse rotateRsp) { + if (rotateRsp == null || !rotateRsp.isSuccess()) { + logger.warn("verify failed then rotate key on agent failed for host " + hostUuid + ": " + (rotateRsp != null ? rotateRsp.getError() : "null")); + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + return; + } + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { + if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + } else { + setHostKeyIdentityVerified(hostUuid, false); + } + completion.success(); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.GetPublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.RotatePublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + return; + } + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.VerifyPublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); return; } String createUrl = buildUrl(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH); - KVMAgentCommands.CreatePublicKeyResponse createRsp = restf.syncJsonPost(createUrl, - new KVMAgentCommands.CreatePublicKeyCmd(), KVMAgentCommands.CreatePublicKeyResponse.class); - if (!createRsp.isSuccess()) { - logger.warn("create key on agent failed for host " + hostUuid + ": " + createRsp.getError()); - setHostKeyIdentityVerified(hostUuid, false); - return; - } + restf.asyncJsonPost(createUrl, new KVMAgentCommands.CreatePublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + logger.warn("create key on agent failed for host " + hostUuid + ": " + (err != null ? err.getDetails() : "")); + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.CreatePublicKeyResponse createRsp) { + if (createRsp == null || !createRsp.isSuccess()) { + logger.warn("create key on agent failed for host " + hostUuid + ": " + (createRsp != null ? createRsp.getError() : "null")); + setHostKeyIdentityVerified(hostUuid, false); + } + completion.success(); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.CreatePublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); } catch (Exception e) { logger.warn("sync secret key after connect failed for host " + hostUuid + ": " + e.getMessage()); try { setHostKeyIdentityVerified(hostUuid, false); } catch (Exception ignored) { } + completion.success(); } } @@ -5491,31 +5571,40 @@ private void handle(SecretHostDefineMsg msg) { return; } String envelopeDekBase64 = java.util.Base64.getEncoder().encodeToString(envelope); - try { - String url = buildUrl(KVMConstant.KVM_ENSURE_SECRET_PATH); - KVMAgentCommands.SecretHostDefineCmd cmd = new KVMAgentCommands.SecretHostDefineCmd(); - cmd.setEncryptedDek(envelopeDekBase64); - cmd.setVmUuid(msg.getVmUuid()); - cmd.setPurpose(msg.getPurpose()); - cmd.setProviderName(msg.getProviderName()); - cmd.setDescription(msg.getDescription() != null ? msg.getDescription() : ""); - KVMAgentCommands.SecretHostDefineResponse rsp = restf.syncJsonPost(url, cmd, KVMAgentCommands.SecretHostDefineResponse.class); - if (rsp.isSuccess()) { - if (rsp.getSecretUuid() != null) { - reply.setSecretUuid(rsp.getSecretUuid()); + String url = buildUrl(KVMConstant.KVM_ENSURE_SECRET_PATH); + KVMAgentCommands.SecretHostDefineCmd cmd = new KVMAgentCommands.SecretHostDefineCmd(); + cmd.setEncryptedDek(envelopeDekBase64); + cmd.setVmUuid(msg.getVmUuid()); + cmd.setPurpose(msg.getPurpose()); + cmd.setProviderName(msg.getProviderName()); + cmd.setDescription(msg.getDescription() != null ? msg.getDescription() : ""); + restf.asyncJsonPost(url, cmd, new JsonAsyncRESTCallback(msg, reply) { + @Override + public void fail(ErrorCode err) { + reply.setError(err != null ? err : operr("ensure secret on agent failed")); + bus.reply(msg, reply); + } + + @Override + public void success(KVMAgentCommands.SecretHostDefineResponse rsp) { + if (rsp != null && rsp.isSuccess()) { + if (rsp.getSecretUuid() != null) { + reply.setSecretUuid(rsp.getSecretUuid()); + } + } else { + reply.setError(operr(rsp != null ? rsp.getError() : "ensure secret failed")); + if (rsp != null && rsp.getErrorCode() != null) { + reply.setErrorCode(rsp.getErrorCode()); + } } bus.reply(msg, reply); - return; } - reply.setError(operr(rsp.getError())); - if (rsp.getErrorCode() != null) { - reply.setErrorCode(rsp.getErrorCode()); + + @Override + public Class getReturnClass() { + return KVMAgentCommands.SecretHostDefineResponse.class; } - bus.reply(msg, reply); - } catch (RestClientException e) { - reply.setError(operr("ensure secret on agent failed: %s", e.getMessage())); - bus.reply(msg, reply); - } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); } @Override From 225ffe0ea663d627c70da121f5b7b9fb188e1825 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Fri, 6 Mar 2026 18:20:25 +0800 Subject: [PATCH 50/76] Limit dek length when define libvirt secret --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 02f6f6e0246..a7dacc8e08e 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5509,6 +5509,8 @@ private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, bool dbf.update(vo); } + private static final int MAX_DEK_BYTES = 1024; + private void handle(SecretHostDefineMsg msg) { SecretHostDefineReply reply = new SecretHostDefineReply(); if (org.apache.commons.lang.StringUtils.isBlank(msg.getDekBase64())) { @@ -5549,6 +5551,12 @@ private void handle(SecretHostDefineMsg msg) { return; } + if (dekRaw.length > MAX_DEK_BYTES) { + reply.setError(operr("dekBase64 decoded payload is too large")); + bus.reply(msg, reply); + return; + } + byte[] pubKeyBytes; try { pubKeyBytes = java.util.Base64.getDecoder().decode(pubKey); From a34f8c124531d34ab09fa5223ea75a845fb7a573 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sat, 7 Mar 2026 19:57:13 +0800 Subject: [PATCH 51/76] Hide dek in log --- .../main/java/org/zstack/header/secret/SecretHostDefineMsg.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java index 4305af89491..c7280743dc0 100644 --- a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java @@ -1,6 +1,7 @@ package org.zstack.header.secret; import org.zstack.header.host.HostMessage; +import org.zstack.header.log.NoLogging; import org.zstack.header.message.NeedReplyMessage; /** @@ -10,6 +11,7 @@ */ public class SecretHostDefineMsg extends NeedReplyMessage implements HostMessage { private String hostUuid; + @NoLogging(type = NoLogging.Type.Simple) private String dekBase64; private String vmUuid; private String purpose; From fc9e8207e3189e3d8792185bf5b101aefd94eaa3 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sat, 7 Mar 2026 20:02:09 +0800 Subject: [PATCH 52/76] Remove test point in stopVm --- .../src/main/java/org/zstack/kvm/KVMHost.java | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index a7dacc8e08e..88d923de0b0 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -3865,26 +3865,6 @@ protected void stopVm(final StopVmOnHypervisorMsg msg, final NoErrorCompletion c checkStatus(); final VmInstanceInventory vminv = msg.getVmInventory(); - { - String dekBase64 = "dGVzdERFSw=="; - SecretHostDefineMsg defineMsg = new SecretHostDefineMsg(); - defineMsg.setHostUuid(getSelf().getUuid()); - defineMsg.setDekBase64(dekBase64); - defineMsg.setVmUuid(vminv.getUuid()); - defineMsg.setPurpose("vm"); - defineMsg.setProviderName("zstack"); - bus.makeTargetServiceIdByResourceUuid(defineMsg, HostConstant.SERVICE_ID, getSelf().getUuid()); - MessageReply defineReply = bus.call(defineMsg); - if (!defineReply.isSuccess()) { - logger.warn(String.format("debug SecretDefine before stop vm[uuid:%s] failed: %s", vminv.getUuid(), defineReply.getError())); - } else { - SecretHostDefineReply srep = defineReply.castReply(); - if (srep != null && srep.getSecretUuid() != null) { - logger.info(String.format("debug SecretDefine before stop vm[uuid:%s] success, secretUuid=%s", vminv.getUuid(), srep.getSecretUuid())); - } - } - } - StopVmCmd cmd = new StopVmCmd(); cmd.setUuid(vminv.getUuid()); cmd.setType(msg.getType()); From 83121e1333b3986c7992c6245f92452d4a042c7b Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sun, 8 Mar 2026 16:58:05 +0800 Subject: [PATCH 53/76] Improve HostSecretCase test --- .../kvm/host/HostSecretCase.groovy | 77 +++++++++++++++++-- 1 file changed, 70 insertions(+), 7 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy index 86708eba7d4..0c00ae5f2c4 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -2,21 +2,28 @@ package org.zstack.test.integration.kvm.host import org.zstack.core.Platform import org.zstack.core.cloudbus.CloudBus +import org.zstack.core.db.DatabaseFacade import org.zstack.header.host.AddHostReply import org.zstack.header.host.HostConstant import org.zstack.header.host.HostInventory +import org.zstack.header.host.HostKeyIdentityVO import org.zstack.header.host.HostStatus +import org.zstack.header.host.PingHostMsg +import org.zstack.header.host.PingHostReply import org.zstack.header.message.MessageReply import org.zstack.kvm.AddKVMHostMsg import org.zstack.kvm.KVMConstant import org.zstack.kvm.KVMAgentCommands import org.zstack.storage.primary.local.LocalStorageKvmBackend import org.zstack.test.integration.kvm.KvmTest +import org.springframework.http.HttpEntity import org.zstack.testlib.EnvSpec import org.zstack.testlib.SubCase import org.zstack.header.secret.SecretHostDefineMsg import org.zstack.header.secret.SecretHostDefineReply +import java.util.concurrent.atomic.AtomicInteger + /** * Integration test for host secret: create/get/rotate/verify public key on connect, * and SecretHostDefine (ensure secret on agent). @@ -28,6 +35,10 @@ class HostSecretCase extends SubCase { CloudBus bus HostInventory addedHost + /** Counters for simulator call assertions (async secret sync / ensureSecret). */ + AtomicInteger createEnvelopeKeyCallCount + AtomicInteger ensureSecretCallCount + /** 32-byte X25519 public key (base64) for simulator; must be valid for HPKE seal. */ static final String MOCK_PUBLIC_KEY_BASE64 = "AQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyA=" @@ -62,6 +73,9 @@ class HostSecretCase extends SubCase { } void registerSecretSimulators() { + createEnvelopeKeyCallCount = new AtomicInteger(0) + ensureSecretCallCount = new AtomicInteger(0) + env.simulator(KVMConstant.KVM_CONNECT_PATH) { def rsp = new KVMAgentCommands.ConnectResponse() rsp.success = true @@ -69,15 +83,36 @@ class HostSecretCase extends SubCase { rsp.qemuVersion = "1.3.0" return rsp } - env.simulator(KVMConstant.KVM_HOST_FACT_PATH) { - def rsp = new KVMAgentCommands.HostFactResponse() - rsp.osDistribution = "CentOS" - rsp.osVersion = "7.0" + // Use afterSimulator like AddHostCase: rely on testlib default HostFactResponse, only set what this test needs. + env.afterSimulator(KVMConstant.KVM_HOST_FACT_PATH) { KVMAgentCommands.HostFactResponse rsp -> + rsp.hvmCpuFlag = "vmx" // default is ""; connect needs vmx/svm to pass checkVirtualizationEnabled + return rsp + } + env.simulator(LocalStorageKvmBackend.INIT_PATH) { HttpEntity e -> + def rsp = new LocalStorageKvmBackend.InitRsp() + rsp.success = true + rsp.localStorageUsedCapacity = 0L + rsp.totalCapacity = 0L + rsp.availableCapacity = 0L + return rsp + } + + // Ping simulator so we can trigger pingHook (which runs sync-envelope-public-key -> KVM_CREATE_ENVELOPE_KEY_PATH). + // needReconnectHost() is true when rsp.version != dbf.getDbVersion(), which sets KVM_HOST_SKIP_PING_NO_FAILURE_EXTENSIONS + // and skips sync-envelope-public-key; so we must return the actual DB version. + def dbVersion = bean(DatabaseFacade.class).getDbVersion() + env.simulator(KVMConstant.KVM_PING_PATH) { HttpEntity e -> + def cmd = org.zstack.utils.gson.JSONObjectUtil.toObject(e.body, KVMAgentCommands.PingCmd.class) + def rsp = new KVMAgentCommands.PingResponse() + rsp.success = true + rsp.hostUuid = cmd.hostUuid + rsp.version = dbVersion + rsp.sendCommandUrl = "http://127.0.0.2:7272" return rsp } - env.simulator(LocalStorageKvmBackend.INIT_PATH) { rsp, _ -> return rsp } env.simulator(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH) { + createEnvelopeKeyCallCount?.incrementAndGet() return new KVMAgentCommands.CreatePublicKeyResponse() } env.simulator(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH) { @@ -92,6 +127,7 @@ class HostSecretCase extends SubCase { return new KVMAgentCommands.RotatePublicKeyResponse() } env.simulator(KVMConstant.KVM_ENSURE_SECRET_PATH) { + ensureSecretCallCount?.incrementAndGet() def rsp = new KVMAgentCommands.SecretHostDefineResponse() rsp.secretUuid = Platform.uuid return rsp @@ -113,17 +149,41 @@ class HostSecretCase extends SubCase { bus.makeLocalServiceId(amsg, HostConstant.SERVICE_ID) AddHostReply reply = (AddHostReply) bus.call(amsg) assert reply != null - assert reply.isSuccess() + assert reply.isSuccess() : "AddHost failed: ${reply.error?.toString() ?: 'no error'}" assert reply.inventory.status == HostStatus.Connected.toString() addedHost = reply.inventory + + // Envelope key sync runs inside pingHook, not during connect. Trigger a ping so that + // sync-envelope-public-key runs and KVM_CREATE_ENVELOPE_KEY_PATH is invoked. + PingHostMsg pingMsg = new PingHostMsg() + pingMsg.hostUuid = addedHost.uuid + bus.makeTargetServiceIdByResourceUuid(pingMsg, HostConstant.SERVICE_ID, addedHost.uuid) + MessageReply pingReply = bus.call(pingMsg) + assert pingReply.isSuccess() : "PingHost failed: ${pingReply.error}" + + assert createEnvelopeKeyCallCount.get() >= 1 : "envelope key sync (KVM_CREATE_ENVELOPE_KEY_PATH) should be triggered at least once after add host" + + // Create/ping only calls createEnvelopeKey; production does not GET and save the key after create. + // Persist HostKeyIdentity so SecretHostDefineMsg finds a public key (same value as KVM_GET_ENVELOPE_KEY_PATH simulator). + HostKeyIdentityVO keyVo = new HostKeyIdentityVO() + keyVo.hostUuid = addedHost.uuid + keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 + keyVo.fingerprint = "" + keyVo.verified = true + bean(DatabaseFacade.class).persist(keyVo) } void testSecretHostDefineSuccess() { assert addedHost != null + int countBefore = ensureSecretCallCount.get() + SecretHostDefineMsg msg = new SecretHostDefineMsg() msg.hostUuid = addedHost.uuid - msg.dekBase64 = "dGVzdERFSw==" // base64 of "testDEK" + msg.dekBase64 = "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8=" + msg.vmUuid = Platform.uuid + msg.purpose = "test-vtpm" + msg.providerName = "vtpm" bus.makeTargetServiceIdByResourceUuid(msg, HostConstant.SERVICE_ID, addedHost.uuid) MessageReply reply = bus.call(msg) @@ -131,6 +191,9 @@ class HostSecretCase extends SubCase { assert reply.isSuccess() SecretHostDefineReply defineReply = reply.castReply() assert defineReply.secretUuid != null + + // Ensure KVM_ENSURE_SECRET_PATH was actually called (asyncJsonPost to agent). + assert ensureSecretCallCount.get() == countBefore + 1 : "KVM_ENSURE_SECRET_PATH simulator should be called exactly once for SecretHostDefineMsg" } void testSecretHostDefineFailWhenNoDek() { From 6e30df609be993c560400a942e8e49f0bd2094ee Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sun, 8 Mar 2026 18:12:25 +0800 Subject: [PATCH 54/76] Compute fingerprint of public key --- .../src/main/java/org/zstack/kvm/KVMHost.java | 178 +++++++++++++----- 1 file changed, 128 insertions(+), 50 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 88d923de0b0..6a4a2b0d324 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -101,6 +101,9 @@ import javax.persistence.TypedQuery; import java.io.IOException; import java.net.URI; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; import java.net.URISyntaxException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; @@ -5330,6 +5333,57 @@ public void handle(ErrorCode errCode, Map data) { private static final long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; + private void doRotateAndGetThenSave(String hostUuid, Completion completion) { + String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); + restf.asyncJsonPost(rotateUrl, new KVMAgentCommands.RotatePublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.RotatePublicKeyResponse rotateRsp) { + if (rotateRsp == null || !rotateRsp.isSuccess()) { + logger.warn("rotate key on agent failed for host " + hostUuid + ": " + (rotateRsp != null ? rotateRsp.getError() : "null")); + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + return; + } + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { + if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + } else { + setHostKeyIdentityVerified(hostUuid, false); + } + completion.success(); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.GetPublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.RotatePublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + } + private void syncEnvelopeKeyAfterPing(Completion completion) { KVMHostVO kvo = dbf.reload(getSelf()); final String hostUuid = kvo.getUuid(); @@ -5348,59 +5402,21 @@ public void fail(ErrorCode err) { @Override public void success(KVMAgentCommands.VerifyPublicKeyResponse vrsp) { if (vrsp != null && vrsp.isSuccess()) { + String storedFp = identity.getFingerprint(); + if (StringUtils.isNotBlank(storedFp)) { + String computed = fingerprintFromPublicKey(identity.getPublicKey()); + if (!storedFp.equals(computed)) { + logger.warn("host " + hostUuid + " verify ok but fingerprint mismatch, rotating and re-getting key"); + doRotateAndGetThenSave(hostUuid, completion); + return; + } + } setHostKeyIdentityVerified(hostUuid, true); completion.success(); return; } if (vrsp != null && !vrsp.isSuccess() && isRotateNeededGetError(vrsp.getErrorCode())) { - String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(rotateUrl, new KVMAgentCommands.RotatePublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.RotatePublicKeyResponse rotateRsp) { - if (rotateRsp == null || !rotateRsp.isSuccess()) { - logger.warn("verify failed then rotate key on agent failed for host " + hostUuid + ": " + (rotateRsp != null ? rotateRsp.getError() : "null")); - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - return; - } - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { - if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - } else { - setHostKeyIdentityVerified(hostUuid, false); - } - completion.success(); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.GetPublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.RotatePublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + doRotateAndGetThenSave(hostUuid, completion); return; } setHostKeyIdentityVerified(hostUuid, false); @@ -5429,8 +5445,34 @@ public void success(KVMAgentCommands.CreatePublicKeyResponse createRsp) { if (createRsp == null || !createRsp.isSuccess()) { logger.warn("create key on agent failed for host " + hostUuid + ": " + (createRsp != null ? createRsp.getError() : "null")); setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + return; } - completion.success(); + String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); + restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), + new JsonAsyncRESTCallback(completion) { + @Override + public void fail(ErrorCode err) { + logger.warn("get public key after create failed for host " + hostUuid + ": " + (err != null ? err.getDetails() : "")); + setHostKeyIdentityVerified(hostUuid, false); + completion.success(); + } + + @Override + public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { + if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { + saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); + } else { + setHostKeyIdentityVerified(hostUuid, false); + } + completion.success(); + } + + @Override + public Class getReturnClass() { + return KVMAgentCommands.GetPublicKeyResponse.class; + } + }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); } @Override @@ -5468,23 +5510,50 @@ private HostKeyIdentityVO getHostKeyIdentity(String hostUuid) { return q.find(); } + /** + * Compute fingerprint from public key (base64): SHA-256 of decoded key bytes, hex-encoded. + * Returns empty string if key is invalid or hashing fails. + */ + private static String fingerprintFromPublicKey(String publicKeyBase64) { + if (publicKeyBase64 == null || publicKeyBase64.isEmpty()) { + return ""; + } + try { + byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64.trim()); + if (keyBytes == null || keyBytes.length == 0) { + return ""; + } + MessageDigest md = MessageDigest.getInstance("SHA-256"); + byte[] hash = md.digest(keyBytes); + StringBuilder sb = new StringBuilder(hash.length * 2); + for (byte b : hash) { + sb.append(String.format("%02x", b & 0xff)); + } + return sb.toString(); + } catch (IllegalArgumentException | NoSuchAlgorithmException e) { + return ""; + } + } + private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, boolean verified) { if (StringUtils.isBlank(publicKey)) { return; } String keyToSave = publicKey.trim(); + String fingerprint = fingerprintFromPublicKey(keyToSave); HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); if (vo == null) { vo = new HostKeyIdentityVO(); vo.setHostUuid(hostUuid); vo.setPublicKey(keyToSave); - vo.setFingerprint(""); + vo.setFingerprint(fingerprint); vo.setVerified(verified); vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); dbf.persist(vo); return; } vo.setPublicKey(keyToSave); + vo.setFingerprint(fingerprint); vo.setVerified(verified); dbf.update(vo); } @@ -5512,6 +5581,15 @@ private void handle(SecretHostDefineMsg msg) { bus.reply(msg, reply); return; } + String storedFingerprint = identity.getFingerprint(); + if (StringUtils.isNotBlank(storedFingerprint)) { + String computed = fingerprintFromPublicKey(pubKey); + if (!storedFingerprint.equals(computed)) { + reply.setError(operr("host public key fingerprint mismatch, key may be corrupted or tampered")); + bus.reply(msg, reply); + return; + } + } if (!Boolean.TRUE.equals(verifyOk)) { reply.setError(operr("host secret key verify not ok, not synced")); bus.reply(msg, reply); From f3dea23e5d1cfb9fa71472bbc7b116ddc9dd6960 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sun, 8 Mar 2026 18:28:12 +0800 Subject: [PATCH 55/76] Verify fingerprint in secret define --- .../src/main/java/org/zstack/kvm/KVMHost.java | 12 +++++------ .../kvm/host/HostSecretCase.groovy | 20 ++++++++++++++++++- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 6a4a2b0d324..bc4e9954b11 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5582,13 +5582,11 @@ private void handle(SecretHostDefineMsg msg) { return; } String storedFingerprint = identity.getFingerprint(); - if (StringUtils.isNotBlank(storedFingerprint)) { - String computed = fingerprintFromPublicKey(pubKey); - if (!storedFingerprint.equals(computed)) { - reply.setError(operr("host public key fingerprint mismatch, key may be corrupted or tampered")); - bus.reply(msg, reply); - return; - } + String computed = fingerprintFromPublicKey(pubKey); + if (!storedFingerprint.equals(computed)) { + reply.setError(operr("host public key fingerprint mismatch, key may be corrupted or tampered")); + bus.reply(msg, reply); + return; } if (!Boolean.TRUE.equals(verifyOk)) { reply.setError(operr("host secret key verify not ok, not synced")); diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy index 0c00ae5f2c4..361b9221232 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -22,6 +22,7 @@ import org.zstack.testlib.SubCase import org.zstack.header.secret.SecretHostDefineMsg import org.zstack.header.secret.SecretHostDefineReply +import java.security.MessageDigest import java.util.concurrent.atomic.AtomicInteger /** @@ -42,6 +43,22 @@ class HostSecretCase extends SubCase { /** 32-byte X25519 public key (base64) for simulator; must be valid for HPKE seal. */ static final String MOCK_PUBLIC_KEY_BASE64 = "AQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyA=" + /** Same algorithm as KVMHost.fingerprintFromPublicKey: SHA-256(decoded base64) in hex. */ + static String fingerprintFromPublicKey(String publicKeyBase64) { + if (publicKeyBase64 == null || publicKeyBase64.isEmpty()) return "" + try { + byte[] keyBytes = java.util.Base64.getDecoder().decode(publicKeyBase64.trim()) + if (keyBytes == null || keyBytes.length == 0) return "" + MessageDigest md = MessageDigest.getInstance("SHA-256") + byte[] hash = md.digest(keyBytes) + StringBuilder sb = new StringBuilder(hash.length * 2) + for (byte b : hash) sb.append(String.format("%02x", b & 0xff)) + return sb.toString() + } catch (Exception e) { + return "" + } + } + @Override void setup() { useSpring(KvmTest.springSpec) @@ -165,10 +182,11 @@ class HostSecretCase extends SubCase { // Create/ping only calls createEnvelopeKey; production does not GET and save the key after create. // Persist HostKeyIdentity so SecretHostDefineMsg finds a public key (same value as KVM_GET_ENVELOPE_KEY_PATH simulator). + // Set fingerprint so the handle(SecretHostDefineMsg) fingerprint check is exercised (must match publicKey). HostKeyIdentityVO keyVo = new HostKeyIdentityVO() keyVo.hostUuid = addedHost.uuid keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 - keyVo.fingerprint = "" + keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) keyVo.verified = true bean(DatabaseFacade.class).persist(keyVo) } From 8a30f81a1e234fd71e14f75bf0f4d057f2c708ed Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Sun, 8 Mar 2026 18:33:14 +0800 Subject: [PATCH 56/76] Remove secret host get/create extension point --- .../secret/SecretCreateExtensionPoint.java | 19 ------------------- .../secret/SecretGetExtensionPoint.java | 19 ------------------- 2 files changed, 38 deletions(-) delete mode 100644 header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java delete mode 100644 header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java diff --git a/header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java b/header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java deleted file mode 100644 index 7c8ab192ecd..00000000000 --- a/header/src/main/java/org/zstack/header/secret/SecretCreateExtensionPoint.java +++ /dev/null @@ -1,19 +0,0 @@ - -package org.zstack.header.secret; - -import org.zstack.header.core.ReturnValueCompletion; - -/** - * Extension point for creating a secret in key-manager (e.g. premium with NKP/KMS). - * Used for VM (e.g. vTPM at VM create). Premium implements with key-manager create; - * success returns secretId/name for later get. - */ -public interface SecretCreateExtensionPoint { - /** - * Create a secret. Implementation (e.g. premium) calls key-manager create. - * - * @param secretName name or identifier for the secret - * @param completion success(secretIdOrName) for later get, or fail(error) - */ - void createSecret(String secretName, ReturnValueCompletion completion); -} diff --git a/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java b/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java deleted file mode 100644 index 78b28283ab9..00000000000 --- a/header/src/main/java/org/zstack/header/secret/SecretGetExtensionPoint.java +++ /dev/null @@ -1,19 +0,0 @@ - -package org.zstack.header.secret; - -import org.zstack.header.core.ReturnValueCompletion; - -/** - * Extension point for getting plaintext DEK from key-manager (e.g. premium with NKP/KMS). - * Used for VM (e.g. to send DEK to host via SecretHostDefineMsg). Premium implements with - * key-manager get; success returns dekBase64 (plaintext DEK, base64). - */ -public interface SecretGetExtensionPoint { - /** - * Get plaintext DEK. Implementation (e.g. premium) calls key-manager get. - * - * @param secretNameOrId secret name or id (from create or stored) - * @param completion success(dekBase64) with plaintext DEK in base64, or fail(error) - */ - void getSecret(String secretNameOrId, ReturnValueCompletion completion); -} From 7014e1a2295d7dfc2f2d3495f0c4eb5167bf0152 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Mon, 9 Mar 2026 14:06:32 +0800 Subject: [PATCH 57/76] Fix primary key confict error --- .../kvm/host/HostSecretCase.groovy | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy index 361b9221232..afcdd213a77 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -3,10 +3,13 @@ package org.zstack.test.integration.kvm.host import org.zstack.core.Platform import org.zstack.core.cloudbus.CloudBus import org.zstack.core.db.DatabaseFacade +import org.zstack.core.db.SimpleQuery +import org.zstack.core.db.SimpleQuery.Op import org.zstack.header.host.AddHostReply import org.zstack.header.host.HostConstant import org.zstack.header.host.HostInventory import org.zstack.header.host.HostKeyIdentityVO +import org.zstack.header.host.HostKeyIdentityVO_ import org.zstack.header.host.HostStatus import org.zstack.header.host.PingHostMsg import org.zstack.header.host.PingHostReply @@ -180,15 +183,25 @@ class HostSecretCase extends SubCase { assert createEnvelopeKeyCallCount.get() >= 1 : "envelope key sync (KVM_CREATE_ENVELOPE_KEY_PATH) should be triggered at least once after add host" - // Create/ping only calls createEnvelopeKey; production does not GET and save the key after create. - // Persist HostKeyIdentity so SecretHostDefineMsg finds a public key (same value as KVM_GET_ENVELOPE_KEY_PATH simulator). - // Set fingerprint so the handle(SecretHostDefineMsg) fingerprint check is exercised (must match publicKey). - HostKeyIdentityVO keyVo = new HostKeyIdentityVO() - keyVo.hostUuid = addedHost.uuid - keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 - keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) - keyVo.verified = true - bean(DatabaseFacade.class).persist(keyVo) + // Create/ping may already persist HostKeyIdentityVO (sync path calls GET then saveOrUpdateHostKeyIdentity). + // Ensure HostKeyIdentity exists with expected key so SecretHostDefineMsg finds it and fingerprint check passes. + DatabaseFacade dbf = bean(DatabaseFacade.class) + SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class) + q.add(HostKeyIdentityVO_.hostUuid, Op.EQ, addedHost.uuid) + HostKeyIdentityVO keyVo = q.find() + if (keyVo == null) { + keyVo = new HostKeyIdentityVO() + keyVo.hostUuid = addedHost.uuid + keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 + keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) + keyVo.verified = true + dbf.persist(keyVo) + } else { + keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 + keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) + keyVo.verified = true + dbf.update(keyVo) + } } void testSecretHostDefineSuccess() { From ce6267ea87436b7731d44e794d9cfeb2f236c207 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Mon, 9 Mar 2026 17:54:44 +0800 Subject: [PATCH 58/76] Use UTF-8 instead HPKE default charset --- .../org/zstack/kvm/HostSecretEnvelopeCrypto.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java index acaca72d4f1..394bdc6933e 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java @@ -29,13 +29,17 @@ public final class HostSecretEnvelopeCrypto { private static final String HPKE_V1 = "HPKE-v1"; /** HPKE application info; must match key-agent main.go: info := []byte("key-agent hpke info") */ private static final byte[] HPKE_INFO = "key-agent hpke info".getBytes(StandardCharsets.UTF_8); - private static final byte[] KEM_ID = new byte[]{0x00, 0x20}; // X25519 HKDF-SHA256 + /** RFC 9180 / IANA HPKE: KEM_ID 0x0020 = DHKEM(X25519, HKDF-SHA256); Nenc = Npk = 32. */ + private static final byte[] KEM_ID = new byte[]{0x00, 0x20}; private static final byte[] KDF_ID = new byte[]{0x00, 0x01}; // HKDF-SHA256 private static final byte[] AEAD_ID = new byte[]{0x00, 0x02}; // AES-256-GCM - private static final byte[] KEM_SUITE_ID = concat("KEM".getBytes(), KEM_ID); // for DHKEM ExtractAndExpand - private static final byte[] SUITE_ID = concat(concat("HPKE".getBytes(), KEM_ID), concat(KDF_ID, AEAD_ID)); + private static final byte[] KEM_SUITE_ID = concat("KEM".getBytes(StandardCharsets.UTF_8), KEM_ID); + private static final byte[] SUITE_ID = concat(concat("HPKE".getBytes(StandardCharsets.UTF_8), KEM_ID), concat(KDF_ID, AEAD_ID)); + /** Nh: KDF output size (SHA-256 = 32). RFC 9180 Section 4. */ private static final int NH = 32; + /** Nk: AEAD key size (AES-256 = 32). RFC 9180 Section 7.3. */ private static final int NK = 32; + /** Nn: AEAD nonce size (AES-GCM = 12). RFC 9180 Section 7.3. */ private static final int NN = 12; private static byte[] concat(byte[] a, byte[] b) { @@ -57,14 +61,14 @@ private static byte[] i2osp(int n, int w) { /** RFC 9180 LabeledExtract(salt, label, ikm); use kemSuiteId for KEM layer, null for HPKE layer (uses SUITE_ID). */ private static byte[] labeledExtract(byte[] salt, String label, byte[] ikm, Digest digest, byte[] suiteId) { byte[] sid = suiteId != null ? suiteId : SUITE_ID; - byte[] labeledIkm = concat(concat(concat(HPKE_V1.getBytes(), sid), label.getBytes()), ikm != null ? ikm : new byte[0]); + byte[] labeledIkm = concat(concat(concat(HPKE_V1.getBytes(StandardCharsets.UTF_8), sid), label.getBytes(StandardCharsets.UTF_8)), ikm != null ? ikm : new byte[0]); return hkdfExtract(salt, labeledIkm, digest); } /** RFC 9180 LabeledExpand(prk, label, info, L); use kemSuiteId for KEM layer, null for HPKE layer. */ private static byte[] labeledExpand(byte[] prk, String label, byte[] info, int L, Digest digest, byte[] suiteId) { byte[] sid = suiteId != null ? suiteId : SUITE_ID; - byte[] labeledInfo = concat(concat(concat(concat(i2osp(L, 2), HPKE_V1.getBytes()), sid), label.getBytes()), info != null ? info : new byte[0]); + byte[] labeledInfo = concat(concat(concat(concat(i2osp(L, 2), HPKE_V1.getBytes(StandardCharsets.UTF_8)), sid), label.getBytes(StandardCharsets.UTF_8)), info != null ? info : new byte[0]); return hkdfExpand(prk, labeledInfo, L, digest); } From cbe58e969f46de168ba15e48b09826e4834ea49b Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Tue, 10 Mar 2026 14:06:15 +0800 Subject: [PATCH 59/76] Move some logic to premium * Move HPKE host secret envelope crypto to premium * Move logic of syncing public key to premium Resolves: ZSPHER-113 Related: http://dev.zstack.io:9080/zstackio/premium/-/merge_requests/13121 --- .../hostSecretEnvelopeCrypto.xml | 23 ++ conf/zstack.xml | 1 + .../zstack/header/host/HostKeyIdentityVO.java | 2 +- ...ostSecretEnvelopeCryptoExtensionPoint.java | 13 ++ .../header/secret/SecretHostDefineMsg.java | 2 +- .../header/secret/SecretHostDefineReply.java | 9 - plugin/kvm/pom.xml | 5 - .../zstack/kvm/HostSecretEnvelopeCrypto.java | 155 -------------- .../java/org/zstack/kvm/KVMAgentCommands.java | 2 +- .../src/main/java/org/zstack/kvm/KVMHost.java | 200 ++---------------- .../kvm/host/HostSecretCase.groovy | 21 +- ...tSecretEnvelopeCryptoExtensionPoint.groovy | 22 ++ ...SecretEnvelopeCryptoExtensionPointMock.xml | 15 ++ 13 files changed, 112 insertions(+), 358 deletions(-) create mode 100644 conf/springConfigXml/hostSecretEnvelopeCrypto.xml create mode 100644 header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java delete mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java create mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy create mode 100644 test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml diff --git a/conf/springConfigXml/hostSecretEnvelopeCrypto.xml b/conf/springConfigXml/hostSecretEnvelopeCrypto.xml new file mode 100644 index 00000000000..69d61dd397e --- /dev/null +++ b/conf/springConfigXml/hostSecretEnvelopeCrypto.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + diff --git a/conf/zstack.xml b/conf/zstack.xml index a4bd0a088b8..61d6ae737a8 100755 --- a/conf/zstack.xml +++ b/conf/zstack.xml @@ -58,6 +58,7 @@ + diff --git a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java index e77873d70fe..1433e075954 100644 --- a/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java +++ b/header/src/main/java/org/zstack/header/host/HostKeyIdentityVO.java @@ -25,7 +25,7 @@ public class HostKeyIdentityVO { @Column private String fingerprint; - @Column(nullable = false) + @Column private Boolean verified = false; @Column diff --git a/header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java b/header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java new file mode 100644 index 00000000000..a166e3d1258 --- /dev/null +++ b/header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java @@ -0,0 +1,13 @@ +package org.zstack.header.secret; + +/** + * Extension point for sealing plaintext (e.g. DEK) with recipient's X25519 public key for host secret (e.g. vTPM). + * Implementation is provided by premium/crypto when available; KVM uses PluginRegistry.getExtensionList() to get it. + */ +public interface HostSecretEnvelopeCryptoExtensionPoint { + /** + * Seal plaintext with recipient's X25519 public key (raw 32 bytes). + * @return envelope = enc (32 bytes) || ciphertext (AEAD output), compatible with key-agent open. + */ + byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws Exception; +} diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java index c7280743dc0..e8aeb6d9370 100644 --- a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java @@ -11,7 +11,7 @@ */ public class SecretHostDefineMsg extends NeedReplyMessage implements HostMessage { private String hostUuid; - @NoLogging(type = NoLogging.Type.Simple) + @NoLogging private String dekBase64; private String vmUuid; private String purpose; diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java index 2149bbdb3ba..13ef13c07df 100644 --- a/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineReply.java @@ -7,17 +7,8 @@ public class SecretHostDefineReply extends MessageReply { public static final String ERROR_CODE_KEYS_NOT_ON_DISK = "KEY_AGENT_KEYS_NOT_ON_DISK"; public static final String ERROR_CODE_KEY_FILES_INTEGRITY_MISMATCH = "KEY_AGENT_KEY_FILES_INTEGRITY_MISMATCH"; - private String errorCode; private String secretUuid; - public String getErrorCode() { - return errorCode; - } - - public void setErrorCode(String errorCode) { - this.errorCode = errorCode; - } - public String getSecretUuid() { return secretUuid; } diff --git a/plugin/kvm/pom.xml b/plugin/kvm/pom.xml index c0f484d0bc7..977ff26f496 100755 --- a/plugin/kvm/pom.xml +++ b/plugin/kvm/pom.xml @@ -9,11 +9,6 @@ kvm - - org.bouncycastle - bcprov-jdk15on - 1.67 - org.zstack compute diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java deleted file mode 100644 index 394bdc6933e..00000000000 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCrypto.java +++ /dev/null @@ -1,155 +0,0 @@ -package org.zstack.kvm; - -import org.bouncycastle.crypto.InvalidCipherTextException; -import org.bouncycastle.crypto.agreement.X25519Agreement; -import org.bouncycastle.crypto.engines.AESEngine; -import org.bouncycastle.crypto.generators.HKDFBytesGenerator; -import org.bouncycastle.crypto.generators.X25519KeyPairGenerator; -import org.bouncycastle.crypto.macs.HMac; -import org.bouncycastle.crypto.modes.GCMBlockCipher; -import org.bouncycastle.crypto.params.AEADParameters; -import org.bouncycastle.crypto.params.HKDFParameters; -import org.bouncycastle.crypto.params.KeyParameter; -import org.bouncycastle.crypto.params.X25519KeyGenerationParameters; -import org.bouncycastle.crypto.params.X25519PublicKeyParameters; -import org.bouncycastle.crypto.Digest; -import org.bouncycastle.crypto.digests.SHA256Digest; -import org.bouncycastle.crypto.AsymmetricCipherKeyPair; - -import java.nio.charset.StandardCharsets; -import java.security.SecureRandom; -import java.util.Arrays; - -/** - * HPKE seal (RFC 9180) compatible with Go key-agent: KEM_X25519_HKDF_SHA256, KDF_HKDF_SHA256, AEAD_AES256GCM. - * Seal: encrypt wrapper DEK with host public key; output = enc (32) || ciphertext (for agent to open with private key). - * Must use the same HPKE "info" as key-agent (cmd/key-agent: info := []byte("key-agent hpke info")) for key schedule. - */ -public final class HostSecretEnvelopeCrypto { - private static final String HPKE_V1 = "HPKE-v1"; - /** HPKE application info; must match key-agent main.go: info := []byte("key-agent hpke info") */ - private static final byte[] HPKE_INFO = "key-agent hpke info".getBytes(StandardCharsets.UTF_8); - /** RFC 9180 / IANA HPKE: KEM_ID 0x0020 = DHKEM(X25519, HKDF-SHA256); Nenc = Npk = 32. */ - private static final byte[] KEM_ID = new byte[]{0x00, 0x20}; - private static final byte[] KDF_ID = new byte[]{0x00, 0x01}; // HKDF-SHA256 - private static final byte[] AEAD_ID = new byte[]{0x00, 0x02}; // AES-256-GCM - private static final byte[] KEM_SUITE_ID = concat("KEM".getBytes(StandardCharsets.UTF_8), KEM_ID); - private static final byte[] SUITE_ID = concat(concat("HPKE".getBytes(StandardCharsets.UTF_8), KEM_ID), concat(KDF_ID, AEAD_ID)); - /** Nh: KDF output size (SHA-256 = 32). RFC 9180 Section 4. */ - private static final int NH = 32; - /** Nk: AEAD key size (AES-256 = 32). RFC 9180 Section 7.3. */ - private static final int NK = 32; - /** Nn: AEAD nonce size (AES-GCM = 12). RFC 9180 Section 7.3. */ - private static final int NN = 12; - - private static byte[] concat(byte[] a, byte[] b) { - byte[] r = new byte[a.length + b.length]; - System.arraycopy(a, 0, r, 0, a.length); - System.arraycopy(b, 0, r, a.length, b.length); - return r; - } - - private static byte[] i2osp(int n, int w) { - byte[] r = new byte[w]; - for (int i = w - 1; i >= 0; i--) { - r[i] = (byte) (n & 0xff); - n >>= 8; - } - return r; - } - - /** RFC 9180 LabeledExtract(salt, label, ikm); use kemSuiteId for KEM layer, null for HPKE layer (uses SUITE_ID). */ - private static byte[] labeledExtract(byte[] salt, String label, byte[] ikm, Digest digest, byte[] suiteId) { - byte[] sid = suiteId != null ? suiteId : SUITE_ID; - byte[] labeledIkm = concat(concat(concat(HPKE_V1.getBytes(StandardCharsets.UTF_8), sid), label.getBytes(StandardCharsets.UTF_8)), ikm != null ? ikm : new byte[0]); - return hkdfExtract(salt, labeledIkm, digest); - } - - /** RFC 9180 LabeledExpand(prk, label, info, L); use kemSuiteId for KEM layer, null for HPKE layer. */ - private static byte[] labeledExpand(byte[] prk, String label, byte[] info, int L, Digest digest, byte[] suiteId) { - byte[] sid = suiteId != null ? suiteId : SUITE_ID; - byte[] labeledInfo = concat(concat(concat(concat(i2osp(L, 2), HPKE_V1.getBytes(StandardCharsets.UTF_8)), sid), label.getBytes(StandardCharsets.UTF_8)), info != null ? info : new byte[0]); - return hkdfExpand(prk, labeledInfo, L, digest); - } - - /** - * RFC 5869 / RFC 9180 HKDF-Extract: returns PRK = HMAC-Hash(salt, IKM). - * Must not use HKDFBytesGenerator with full init (that does Extract+Expand); Bouncy Castle - * would then return Expand(PRK, "", L) instead of PRK. We implement Extract only via HMAC. - */ - private static byte[] hkdfExtract(byte[] salt, byte[] ikm, Digest digest) { - byte[] saltBytes = (salt != null && salt.length > 0) ? salt : new byte[NH]; - HMac hmac = new HMac(digest); - hmac.init(new KeyParameter(saltBytes)); - hmac.update(ikm, 0, ikm.length); - byte[] prk = new byte[NH]; - hmac.doFinal(prk, 0); - return prk; - } - - /** - * RFC 5869 / RFC 9180 HKDF-Expand: OKM = HKDF-Expand(PRK, info, L). - * Must use skipExtractParameters(prk, info) so that Bouncy Castle uses prk as PRK and - * only performs Expand. Using HKDFParameters(prk, null, info) would make BC do - * Extract(null, prk) then Expand, which is wrong. - */ - private static byte[] hkdfExpand(byte[] prk, byte[] info, int L, Digest digest) { - HKDFBytesGenerator gen = new HKDFBytesGenerator(digest); - gen.init(HKDFParameters.skipExtractParameters(prk, info != null ? info : new byte[0])); - byte[] out = new byte[L]; - gen.generateBytes(out, 0, out.length); - return out; - } - - /** - * Seal plaintext with recipient's X25519 public key (raw 32 bytes). - * Returns envelope = enc (32 bytes) || ciphertext (AEAD output). - */ - public static byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws InvalidCipherTextException { - if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { - throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null"); - } - SecureRandom random = new SecureRandom(); - Digest digest = new SHA256Digest(); - - // 1. Generate ephemeral X25519 key pair (BC crypto) - X25519KeyPairGenerator kpg = new X25519KeyPairGenerator(); - kpg.init(new X25519KeyGenerationParameters(random)); - AsymmetricCipherKeyPair ephemeralKp = kpg.generateKeyPair(); - X25519PublicKeyParameters ephemeralPub = (X25519PublicKeyParameters) ephemeralKp.getPublic(); - byte[] enc = ephemeralPub.getEncoded(); - - // 2. DH shared secret (ephemeral priv, recipient pub) - X25519PublicKeyParameters recipientPub = new X25519PublicKeyParameters(recipientPublicKey, 0); - X25519Agreement agreement = new X25519Agreement(); - agreement.init(ephemeralKp.getPrivate()); - byte[] sharedSecret = new byte[32]; - agreement.calculateAgreement(recipientPub, sharedSecret, 0); - - // 3. KEM shared_secret (DHKEM ExtractAndExpand) with KEM suite_id - byte[] kemContext = concat(enc, recipientPublicKey); - byte[] eaePrk = labeledExtract(new byte[0], "eae_prk", sharedSecret, digest, KEM_SUITE_ID); - byte[] kemSharedSecret = labeledExpand(eaePrk, "shared_secret", kemContext, NH, digest, KEM_SUITE_ID); - - // 4. Key schedule (base mode, empty psk; info must match key-agent NewReceiver(sk, info)) - byte[] pskIdHash = labeledExtract(new byte[0], "psk_id_hash", new byte[0], digest, null); - byte[] infoHash = labeledExtract(new byte[0], "info_hash", HPKE_INFO, digest, null); - byte[] keyScheduleContext = concat(new byte[]{0x00}, concat(pskIdHash, infoHash)); - byte[] secret = labeledExtract(kemSharedSecret, "secret", new byte[0], digest, null); - byte[] key = labeledExpand(secret, "key", keyScheduleContext, NK, digest, null); - byte[] baseNonce = labeledExpand(secret, "base_nonce", keyScheduleContext, NN, digest, null); - - // 5. AEAD Seal (AES-256-GCM, empty aad) - GCMBlockCipher cipher = new GCMBlockCipher(new AESEngine()); - cipher.init(true, new AEADParameters(new KeyParameter(key), 128, baseNonce)); - byte[] out = new byte[cipher.getOutputSize(plaintext.length)]; - int len = cipher.processBytes(plaintext, 0, plaintext.length, out, 0); - len += cipher.doFinal(out, len); - byte[] ct = Arrays.copyOf(out, len); - - return concat(enc, ct); - } - - private HostSecretEnvelopeCrypto() { - } -} \ No newline at end of file diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index eb121f58c32..3f982544168 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -425,7 +425,7 @@ public void setErrorCode(String errorCode) { public static class SecretHostDefineCmd extends AgentCommand { private String envelopeDekBase64; - /** Base64 wrapped DEK; agent expects this field name (encryptedDek). */ + /** Base64 envelope of DEK; agent expects this field name (encryptedDek). */ private String encryptedDek; private String vmUuid; private String purpose; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index bc4e9954b11..7ccd0fe06f8 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -55,6 +55,7 @@ import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.host.*; import org.zstack.header.host.MigrateVmOnHypervisorMsg.StorageMigrationPolicy; +import org.zstack.header.secret.HostSecretEnvelopeCryptoExtensionPoint; import org.zstack.header.secret.SecretHostDefineMsg; import org.zstack.header.secret.SecretHostDefineReply; import org.zstack.header.message.APIMessage; @@ -5290,30 +5291,6 @@ public void fail(ErrorCode errorCode) { } }); - flow(new NoRollbackFlow() { - String __name__ = "sync-envelope-public-key"; - - @Override - public boolean skip(Map data) { - return data.get(KVMConstant.KVM_HOST_SKIP_PING_NO_FAILURE_EXTENSIONS) != null; - } - - @Override - public void run(FlowTrigger trigger, Map data) { - syncEnvelopeKeyAfterPing(new Completion(trigger) { - @Override - public void success() { - trigger.next(); - } - - @Override - public void fail(ErrorCode errCode) { - trigger.next(); - } - }); - } - }); - done(new FlowDoneHandler(completion) { @Override public void handle(Map data) { @@ -5333,163 +5310,6 @@ public void handle(ErrorCode errCode, Map data) { private static final long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; - private void doRotateAndGetThenSave(String hostUuid, Completion completion) { - String rotateUrl = buildUrl(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(rotateUrl, new KVMAgentCommands.RotatePublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.RotatePublicKeyResponse rotateRsp) { - if (rotateRsp == null || !rotateRsp.isSuccess()) { - logger.warn("rotate key on agent failed for host " + hostUuid + ": " + (rotateRsp != null ? rotateRsp.getError() : "null")); - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - return; - } - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { - if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - } else { - setHostKeyIdentityVerified(hostUuid, false); - } - completion.success(); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.GetPublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.RotatePublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - } - - private void syncEnvelopeKeyAfterPing(Completion completion) { - KVMHostVO kvo = dbf.reload(getSelf()); - final String hostUuid = kvo.getUuid(); - try { - HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); - if (identity != null && StringUtils.isNotBlank(identity.getPublicKey())) { - String verifyUrl = buildUrl(KVMConstant.KVM_VERIFY_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(verifyUrl, new KVMAgentCommands.VerifyPublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.VerifyPublicKeyResponse vrsp) { - if (vrsp != null && vrsp.isSuccess()) { - String storedFp = identity.getFingerprint(); - if (StringUtils.isNotBlank(storedFp)) { - String computed = fingerprintFromPublicKey(identity.getPublicKey()); - if (!storedFp.equals(computed)) { - logger.warn("host " + hostUuid + " verify ok but fingerprint mismatch, rotating and re-getting key"); - doRotateAndGetThenSave(hostUuid, completion); - return; - } - } - setHostKeyIdentityVerified(hostUuid, true); - completion.success(); - return; - } - if (vrsp != null && !vrsp.isSuccess() && isRotateNeededGetError(vrsp.getErrorCode())) { - doRotateAndGetThenSave(hostUuid, completion); - return; - } - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.VerifyPublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - return; - } - String createUrl = buildUrl(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(createUrl, new KVMAgentCommands.CreatePublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - logger.warn("create key on agent failed for host " + hostUuid + ": " + (err != null ? err.getDetails() : "")); - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.CreatePublicKeyResponse createRsp) { - if (createRsp == null || !createRsp.isSuccess()) { - logger.warn("create key on agent failed for host " + hostUuid + ": " + (createRsp != null ? createRsp.getError() : "null")); - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - return; - } - String getUrl = buildUrl(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH); - restf.asyncJsonPost(getUrl, new KVMAgentCommands.GetPublicKeyCmd(), - new JsonAsyncRESTCallback(completion) { - @Override - public void fail(ErrorCode err) { - logger.warn("get public key after create failed for host " + hostUuid + ": " + (err != null ? err.getDetails() : "")); - setHostKeyIdentityVerified(hostUuid, false); - completion.success(); - } - - @Override - public void success(KVMAgentCommands.GetPublicKeyResponse getRsp) { - if (getRsp != null && getRsp.isSuccess() && StringUtils.isNotBlank(getRsp.getPublicKey())) { - saveOrUpdateHostKeyIdentity(hostUuid, getRsp.getPublicKey().trim(), true); - } else { - setHostKeyIdentityVerified(hostUuid, false); - } - completion.success(); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.GetPublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - } - - @Override - public Class getReturnClass() { - return KVMAgentCommands.CreatePublicKeyResponse.class; - } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); - } catch (Exception e) { - logger.warn("sync secret key after connect failed for host " + hostUuid + ": " + e.getMessage()); - try { - setHostKeyIdentityVerified(hostUuid, false); - } catch (Exception ignored) { - } - completion.success(); - } - } - private void setHostKeyIdentityVerified(String hostUuid, boolean verified) { HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); if (vo != null) { @@ -5626,10 +5446,16 @@ private void handle(SecretHostDefineMsg msg) { bus.reply(msg, reply); return; } + java.util.List sealers = pluginRegistry.getExtensionList(HostSecretEnvelopeCryptoExtensionPoint.class); + if (sealers == null || sealers.isEmpty()) { + reply.setError(operr("host secret envelope sealer not available (premium crypto module required)")); + bus.reply(msg, reply); + return; + } byte[] envelope; try { - envelope = HostSecretEnvelopeCrypto.seal(pubKeyBytes, dekRaw); - } catch (org.bouncycastle.crypto.InvalidCipherTextException e) { + envelope = sealers.get(0).seal(pubKeyBytes, dekRaw); + } catch (Exception e) { reply.setError(operr("HPKE seal failed: %s", e.getMessage())); bus.reply(msg, reply); return; @@ -5656,9 +5482,13 @@ public void success(KVMAgentCommands.SecretHostDefineResponse rsp) { reply.setSecretUuid(rsp.getSecretUuid()); } } else { - reply.setError(operr(rsp != null ? rsp.getError() : "ensure secret failed")); if (rsp != null && rsp.getErrorCode() != null) { - reply.setErrorCode(rsp.getErrorCode()); + ErrorCode err = new ErrorCode(); + err.setCode(rsp.getErrorCode()); + err.setDetails(rsp.getError() != null ? rsp.getError() : "ensure secret failed"); + reply.setError(err); + } else { + reply.setError(operr(rsp != null ? rsp.getError() : "ensure secret failed")); } } bus.reply(msg, reply); diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy index afcdd213a77..685c94ade99 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -64,7 +64,26 @@ class HostSecretCase extends SubCase { @Override void setup() { - useSpring(KvmTest.springSpec) + // Use KvmTest spring spec plus mock HostSecretEnvelopeCryptoExtensionPoint (premium/crypto not on test classpath) + useSpring(makeSpring { + sftpBackupStorage() + localStorage() + nfsPrimaryStorage() + smp() + virtualRouter() + flatNetwork() + securityGroup() + kvm() + ceph() + vyos() + flatNetwork() + eip() + lb() + portForwarding() + include("LongJobManager.xml") + include("HostAllocateExtension.xml") + include("HostSecretEnvelopeCryptoExtensionPointMock.xml") + }) } @Override diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy new file mode 100644 index 00000000000..c72ee05d8e0 --- /dev/null +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy @@ -0,0 +1,22 @@ +package org.zstack.test.integration.kvm.host + +import org.zstack.header.secret.HostSecretEnvelopeCryptoExtensionPoint + +/** + * Mock implementation for integration test when premium/crypto is not on classpath. + * Returns a fake envelope (32-byte enc + plaintext + 12 tag) so KVM proceeds to call the agent simulator. + */ +class MockHostSecretEnvelopeCryptoExtensionPoint implements HostSecretEnvelopeCryptoExtensionPoint { + @Override + byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws Exception { + if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { + throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null") + } + int encLen = 32 + int tagLen = 12 + byte[] envelope = new byte[encLen + plaintext.length + tagLen] + System.arraycopy(recipientPublicKey, 0, envelope, 0, encLen) + System.arraycopy(plaintext, 0, envelope, encLen, plaintext.length) + return envelope + } +} diff --git a/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml b/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml new file mode 100644 index 00000000000..b4e41cf2794 --- /dev/null +++ b/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml @@ -0,0 +1,15 @@ + + + + + + + + + From 8438c7a2189537d0829fc0bd30661e399e81daf6 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Tue, 10 Mar 2026 15:25:31 +0800 Subject: [PATCH 60/76] Remove redunrant extension config --- .../hostSecretEnvelopeCrypto.xml | 23 ------------------- conf/zstack.xml | 1 - 2 files changed, 24 deletions(-) delete mode 100644 conf/springConfigXml/hostSecretEnvelopeCrypto.xml diff --git a/conf/springConfigXml/hostSecretEnvelopeCrypto.xml b/conf/springConfigXml/hostSecretEnvelopeCrypto.xml deleted file mode 100644 index 69d61dd397e..00000000000 --- a/conf/springConfigXml/hostSecretEnvelopeCrypto.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - diff --git a/conf/zstack.xml b/conf/zstack.xml index 61d6ae737a8..a4bd0a088b8 100755 --- a/conf/zstack.xml +++ b/conf/zstack.xml @@ -58,7 +58,6 @@ - From 8370b4d846a414a58e2e8090e2cd1439a8405cfd Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Tue, 10 Mar 2026 17:36:35 +0800 Subject: [PATCH 61/76] Add envelope test --- ...ostSecretEnvelopeCryptoExtensionPoint.java | 2 +- .../main/java/org/zstack/kvm/KVMConstant.java | 3 + .../src/main/java/org/zstack/kvm/KVMHost.java | 5 +- test/pom.xml | 2 +- .../kvm/host/HostSecretCase.groovy | 63 ++++--------------- ...stSecretEnvelopeCryptoTestExtension.groovy | 25 ++++++++ ...tSecretEnvelopeCryptoExtensionPoint.groovy | 22 ------- ...HostSecretEnvelopeCryptoTestExtension.xml} | 4 +- 8 files changed, 46 insertions(+), 80 deletions(-) rename {header/src/main/java/org/zstack/header/secret => plugin/kvm/src/main/java/org/zstack/kvm}/HostSecretEnvelopeCryptoExtensionPoint.java (94%) create mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy delete mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy rename test/src/test/resources/springConfigXml/{HostSecretEnvelopeCryptoExtensionPointMock.xml => HostSecretEnvelopeCryptoTestExtension.xml} (66%) diff --git a/header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCryptoExtensionPoint.java similarity index 94% rename from header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java rename to plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCryptoExtensionPoint.java index a166e3d1258..252bffbf055 100644 --- a/header/src/main/java/org/zstack/header/secret/HostSecretEnvelopeCryptoExtensionPoint.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostSecretEnvelopeCryptoExtensionPoint.java @@ -1,4 +1,4 @@ -package org.zstack.header.secret; +package org.zstack.kvm; /** * Extension point for sealing plaintext (e.g. DEK) with recipient's X25519 public key for host secret (e.g. vTPM). diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index 7355ed99f3b..95280978e0e 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -128,6 +128,9 @@ public interface KVMConstant { String KVM_VERIFY_ENVELOPE_KEY_PATH = "/host/key/envelope/checkEnvelopeKey"; String KVM_ENSURE_SECRET_PATH = "/host/key/envelope/ensureSecret"; + /** HTTP timeout in seconds for envelope key sync (verify/create/rotate/get) to agent. */ + long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; + String KVM_HOST_FILE_DOWNLOAD_PATH = "/host/file/download"; String KVM_HOST_FILE_UPLOAD_PATH = "/host/file/upload"; String KVM_HOST_FILE_DOWNLOAD_PROGRESS_PATH = "/host/file/progress"; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 7ccd0fe06f8..979a3715a48 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -55,7 +55,6 @@ import org.zstack.header.exception.CloudRuntimeException; import org.zstack.header.host.*; import org.zstack.header.host.MigrateVmOnHypervisorMsg.StorageMigrationPolicy; -import org.zstack.header.secret.HostSecretEnvelopeCryptoExtensionPoint; import org.zstack.header.secret.SecretHostDefineMsg; import org.zstack.header.secret.SecretHostDefineReply; import org.zstack.header.message.APIMessage; @@ -5308,8 +5307,6 @@ public void handle(ErrorCode errCode, Map data) { }).start(); } - private static final long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; - private void setHostKeyIdentityVerified(String hostUuid, boolean verified) { HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); if (vo != null) { @@ -5498,7 +5495,7 @@ public void success(KVMAgentCommands.SecretHostDefineResponse rsp) { public Class getReturnClass() { return KVMAgentCommands.SecretHostDefineResponse.class; } - }, TimeUnit.SECONDS, ENVELOPE_KEY_HTTP_TIMEOUT_SEC); + }, TimeUnit.SECONDS, KVMConstant.ENVELOPE_KEY_HTTP_TIMEOUT_SEC); } @Override diff --git a/test/pom.xml b/test/pom.xml index e054377d284..3f81ded3240 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -9,7 +9,7 @@ zstack org.zstack - 4.10.0 + 4.10.0 .. test diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy index 685c94ade99..144c3a76613 100644 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy @@ -29,9 +29,8 @@ import java.security.MessageDigest import java.util.concurrent.atomic.AtomicInteger /** - * Integration test for host secret: create/get/rotate/verify public key on connect, - * and SecretHostDefine (ensure secret on agent). - * Uses simulated agent for all secret paths. + * Integration test for SecretHostDefine (ensure secret on agent) on KVM host. + * Uses simulated agent; envelope key sync-on-ping and HPKE seal are covered in premium tests. */ class HostSecretCase extends SubCase { EnvSpec env @@ -39,8 +38,7 @@ class HostSecretCase extends SubCase { CloudBus bus HostInventory addedHost - /** Counters for simulator call assertions (async secret sync / ensureSecret). */ - AtomicInteger createEnvelopeKeyCallCount + /** Counters for simulator call assertions (ensureSecret). */ AtomicInteger ensureSecretCallCount /** 32-byte X25519 public key (base64) for simulator; must be valid for HPKE seal. */ @@ -64,7 +62,8 @@ class HostSecretCase extends SubCase { @Override void setup() { - // Use KvmTest spring spec plus mock HostSecretEnvelopeCryptoExtensionPoint (premium/crypto not on test classpath) + // Run without premium/crypto; use test extension HostSecretEnvelopeCryptoTestExtension so that + // SecretHostDefine path can run and call the agent simulator. Sync-on-ping is covered in premium tests. useSpring(makeSpring { sftpBackupStorage() localStorage() @@ -82,7 +81,7 @@ class HostSecretCase extends SubCase { portForwarding() include("LongJobManager.xml") include("HostAllocateExtension.xml") - include("HostSecretEnvelopeCryptoExtensionPointMock.xml") + include("HostSecretEnvelopeCryptoTestExtension.xml") }) } @@ -95,7 +94,7 @@ class HostSecretCase extends SubCase { void test() { env.create { prepare() - testAddHostWithSecretSync() + prepareHostForSecretTests() testSecretHostDefineSuccess() testSecretHostDefineFailWhenNoDek() } @@ -112,7 +111,6 @@ class HostSecretCase extends SubCase { } void registerSecretSimulators() { - createEnvelopeKeyCallCount = new AtomicInteger(0) ensureSecretCallCount = new AtomicInteger(0) env.simulator(KVMConstant.KVM_CONNECT_PATH) { @@ -136,35 +134,6 @@ class HostSecretCase extends SubCase { return rsp } - // Ping simulator so we can trigger pingHook (which runs sync-envelope-public-key -> KVM_CREATE_ENVELOPE_KEY_PATH). - // needReconnectHost() is true when rsp.version != dbf.getDbVersion(), which sets KVM_HOST_SKIP_PING_NO_FAILURE_EXTENSIONS - // and skips sync-envelope-public-key; so we must return the actual DB version. - def dbVersion = bean(DatabaseFacade.class).getDbVersion() - env.simulator(KVMConstant.KVM_PING_PATH) { HttpEntity e -> - def cmd = org.zstack.utils.gson.JSONObjectUtil.toObject(e.body, KVMAgentCommands.PingCmd.class) - def rsp = new KVMAgentCommands.PingResponse() - rsp.success = true - rsp.hostUuid = cmd.hostUuid - rsp.version = dbVersion - rsp.sendCommandUrl = "http://127.0.0.2:7272" - return rsp - } - - env.simulator(KVMConstant.KVM_CREATE_ENVELOPE_KEY_PATH) { - createEnvelopeKeyCallCount?.incrementAndGet() - return new KVMAgentCommands.CreatePublicKeyResponse() - } - env.simulator(KVMConstant.KVM_GET_ENVELOPE_KEY_PATH) { - def rsp = new KVMAgentCommands.GetPublicKeyResponse() - rsp.publicKey = MOCK_PUBLIC_KEY_BASE64 - return rsp - } - env.simulator(KVMConstant.KVM_VERIFY_ENVELOPE_KEY_PATH) { - return new KVMAgentCommands.VerifyPublicKeyResponse() - } - env.simulator(KVMConstant.KVM_ROTATE_ENVELOPE_KEY_PATH) { - return new KVMAgentCommands.RotatePublicKeyResponse() - } env.simulator(KVMConstant.KVM_ENSURE_SECRET_PATH) { ensureSecretCallCount?.incrementAndGet() def rsp = new KVMAgentCommands.SecretHostDefineResponse() @@ -173,7 +142,12 @@ class HostSecretCase extends SubCase { } } - void testAddHostWithSecretSync() { + /** + * Prepare a connected KVM host and corresponding HostKeyIdentityVO so that + * SecretHostDefineMsg can succeed. Envelope key sync-on-ping itself is + * covered by premium KVMEnvelopeKeySyncExtensionCase. + */ + void prepareHostForSecretTests() { registerSecretSimulators() AddKVMHostMsg amsg = new AddKVMHostMsg() @@ -192,17 +166,6 @@ class HostSecretCase extends SubCase { assert reply.inventory.status == HostStatus.Connected.toString() addedHost = reply.inventory - // Envelope key sync runs inside pingHook, not during connect. Trigger a ping so that - // sync-envelope-public-key runs and KVM_CREATE_ENVELOPE_KEY_PATH is invoked. - PingHostMsg pingMsg = new PingHostMsg() - pingMsg.hostUuid = addedHost.uuid - bus.makeTargetServiceIdByResourceUuid(pingMsg, HostConstant.SERVICE_ID, addedHost.uuid) - MessageReply pingReply = bus.call(pingMsg) - assert pingReply.isSuccess() : "PingHost failed: ${pingReply.error}" - - assert createEnvelopeKeyCallCount.get() >= 1 : "envelope key sync (KVM_CREATE_ENVELOPE_KEY_PATH) should be triggered at least once after add host" - - // Create/ping may already persist HostKeyIdentityVO (sync path calls GET then saveOrUpdateHostKeyIdentity). // Ensure HostKeyIdentity exists with expected key so SecretHostDefineMsg finds it and fingerprint check passes. DatabaseFacade dbf = bean(DatabaseFacade.class) SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class) diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy new file mode 100644 index 00000000000..f15f7ce8b16 --- /dev/null +++ b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy @@ -0,0 +1,25 @@ +package org.zstack.test.integration.kvm.host.secret + +import org.zstack.kvm.HostSecretEnvelopeCryptoExtensionPoint + +/** + * Test-side mock of HostSecretEnvelopeCryptoExtensionPoint. + * Does NOT call premium crypto; just returns a fake envelope that looks structurally valid to the agent. + */ +class HostSecretEnvelopeCryptoTestExtension implements HostSecretEnvelopeCryptoExtensionPoint { + @Override + byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws Exception { + if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { + throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null") + } + int encLen = 32 + int tagLen = 12 + byte[] envelope = new byte[encLen + plaintext.length + tagLen] + // Put the recipient public key into the "enc" slot so envelope[0..31] looks like a plausible X25519 public key. + System.arraycopy(recipientPublicKey, 0, envelope, 0, encLen) + // Copy plaintext in the middle; the simulator never decrypts it. + System.arraycopy(plaintext, 0, envelope, encLen, plaintext.length) + // Leave the last 12 bytes as zeros to mimic an AEAD tag. + return envelope + } +} diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy deleted file mode 100644 index c72ee05d8e0..00000000000 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/MockHostSecretEnvelopeCryptoExtensionPoint.groovy +++ /dev/null @@ -1,22 +0,0 @@ -package org.zstack.test.integration.kvm.host - -import org.zstack.header.secret.HostSecretEnvelopeCryptoExtensionPoint - -/** - * Mock implementation for integration test when premium/crypto is not on classpath. - * Returns a fake envelope (32-byte enc + plaintext + 12 tag) so KVM proceeds to call the agent simulator. - */ -class MockHostSecretEnvelopeCryptoExtensionPoint implements HostSecretEnvelopeCryptoExtensionPoint { - @Override - byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws Exception { - if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { - throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null") - } - int encLen = 32 - int tagLen = 12 - byte[] envelope = new byte[encLen + plaintext.length + tagLen] - System.arraycopy(recipientPublicKey, 0, envelope, 0, encLen) - System.arraycopy(plaintext, 0, envelope, encLen, plaintext.length) - return envelope - } -} diff --git a/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml b/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml similarity index 66% rename from test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml rename to test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml index b4e41cf2794..9b4cb4b288f 100644 --- a/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoExtensionPointMock.xml +++ b/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml @@ -7,9 +7,9 @@ http://zstack.org/schema/zstack http://zstack.org/schema/zstack/plugin.xsd"> - + - + From 5da8d06e09f512d45bb195e831095ec673f15edf Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Tue, 10 Mar 2026 18:31:52 +0800 Subject: [PATCH 62/76] Introduce host key helper --- .../org/zstack/kvm/HostKeyIdentityHelper.java | 123 ++++++++++++++++++ .../main/java/org/zstack/kvm/KVMConstant.java | 3 + .../src/main/java/org/zstack/kvm/KVMHost.java | 76 +---------- 3 files changed, 129 insertions(+), 73 deletions(-) create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java new file mode 100644 index 00000000000..ffe65edca47 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java @@ -0,0 +1,123 @@ +package org.zstack.kvm; + +import org.apache.commons.lang.StringUtils; +import org.zstack.core.db.DatabaseFacade; +import org.zstack.core.db.SimpleQuery; +import org.zstack.core.db.SimpleQuery.Op; +import org.zstack.header.host.HostKeyIdentityVO; +import org.zstack.header.host.HostKeyIdentityVO_; +import org.zstack.header.secret.SecretHostDefineReply; +import org.zstack.utils.logging.CLogger; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; + +/** + * Shared helper for host envelope key identity: fingerprint and save/update. + * Used by KVMHost (zstack) and KVMEnvelopeKeySyncExtension (premium). + */ +public final class HostKeyIdentityHelper { + private static final CLogger logger = org.zstack.utils.logging.CLoggerImpl.getLogger(HostKeyIdentityHelper.class); + + private HostKeyIdentityHelper() { + } + + /** + * Compute fingerprint from public key (base64): SHA-256 of decoded key bytes, hex-encoded. + * Returns empty string if key is invalid or hashing fails. + */ + public static String fingerprintFromPublicKey(String publicKeyBase64) { + if (publicKeyBase64 == null || publicKeyBase64.isEmpty()) { + return ""; + } + try { + byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64.trim()); + if (keyBytes == null || keyBytes.length == 0) { + return ""; + } + MessageDigest md = MessageDigest.getInstance("SHA-256"); + byte[] hash = md.digest(keyBytes); + StringBuilder sb = new StringBuilder(hash.length * 2); + for (byte b : hash) { + sb.append(String.format("%02x", b & 0xff)); + } + return sb.toString(); + } catch (IllegalArgumentException | NoSuchAlgorithmException e) { + return ""; + } + } + + public static HostKeyIdentityVO getHostKeyIdentity(DatabaseFacade dbf, String hostUuid) { + SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class); + q.add(HostKeyIdentityVO_.hostUuid, Op.EQ, hostUuid); + return q.find(); + } + + /** + * Save or update host key identity. Validates publicKey (base64, 32 bytes, non-empty fingerprint). + * On invalid key, marks existing record as verified=false and returns without persisting bad data. + */ + public static void saveOrUpdateHostKeyIdentity(DatabaseFacade dbf, String hostUuid, String publicKey, boolean verified) { + if (StringUtils.isBlank(publicKey)) { + return; + } + + String keyToSave = publicKey.trim(); + byte[] decodedKey; + try { + decodedKey = Base64.getDecoder().decode(keyToSave); + } catch (IllegalArgumentException e) { + logger.warn("host " + hostUuid + " returned an invalid envelope public key"); + setVerified(dbf, hostUuid, false); + return; + } + if (decodedKey.length != 32) { + logger.warn("host " + hostUuid + " returned an envelope public key with unexpected length: " + decodedKey.length); + setVerified(dbf, hostUuid, false); + return; + } + String fingerprint = fingerprintFromPublicKey(keyToSave); + if (StringUtils.isBlank(fingerprint)) { + logger.warn("host " + hostUuid + " returned an envelope public key with empty fingerprint"); + setVerified(dbf, hostUuid, false); + return; + } + + HostKeyIdentityVO vo = getHostKeyIdentity(dbf, hostUuid); + if (vo == null) { + vo = new HostKeyIdentityVO(); + vo.setHostUuid(hostUuid); + vo.setPublicKey(keyToSave); + vo.setFingerprint(fingerprint); + vo.setVerified(verified); + vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); + dbf.persist(vo); + return; + } + vo.setPublicKey(keyToSave); + vo.setFingerprint(fingerprint); + vo.setVerified(verified); + dbf.update(vo); + } + + /** + * Set verified flag for existing host key identity record, if present. + */ + public static void setVerified(DatabaseFacade dbf, String hostUuid, boolean verified) { + HostKeyIdentityVO vo = getHostKeyIdentity(dbf, hostUuid); + if (vo != null) { + vo.setVerified(verified); + dbf.update(vo); + } + } + + /** + * Whether the verify response error code indicates that key rotate/re-fetch is needed. + */ + public static boolean isRotateNeededGetError(String errorCode) { + if (errorCode == null) return false; + return SecretHostDefineReply.ERROR_CODE_KEYS_NOT_ON_DISK.equals(errorCode) + || SecretHostDefineReply.ERROR_CODE_KEY_FILES_INTEGRITY_MISMATCH.equals(errorCode); + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java index 95280978e0e..c2cd54c184f 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMConstant.java @@ -131,6 +131,9 @@ public interface KVMConstant { /** HTTP timeout in seconds for envelope key sync (verify/create/rotate/get) to agent. */ long ENVELOPE_KEY_HTTP_TIMEOUT_SEC = 5L; + /** Max size in bytes for DEK payload in SecretHostDefine (decoded from dekBase64). */ + int MAX_DEK_BYTES = 1024; + String KVM_HOST_FILE_DOWNLOAD_PATH = "/host/file/download"; String KVM_HOST_FILE_UPLOAD_PATH = "/host/file/upload"; String KVM_HOST_FILE_DOWNLOAD_PROGRESS_PATH = "/host/file/progress"; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 979a3715a48..01635771110 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5307,76 +5307,6 @@ public void handle(ErrorCode errCode, Map data) { }).start(); } - private void setHostKeyIdentityVerified(String hostUuid, boolean verified) { - HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); - if (vo != null) { - vo.setVerified(verified); - dbf.update(vo); - } - } - - private static boolean isRotateNeededGetError(String errorCode) { - if (errorCode == null) return false; - return SecretHostDefineReply.ERROR_CODE_KEYS_NOT_ON_DISK.equals(errorCode) - || SecretHostDefineReply.ERROR_CODE_KEY_FILES_INTEGRITY_MISMATCH.equals(errorCode); - } - - private HostKeyIdentityVO getHostKeyIdentity(String hostUuid) { - SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class); - q.add(HostKeyIdentityVO_.hostUuid, Op.EQ, hostUuid); - return q.find(); - } - - /** - * Compute fingerprint from public key (base64): SHA-256 of decoded key bytes, hex-encoded. - * Returns empty string if key is invalid or hashing fails. - */ - private static String fingerprintFromPublicKey(String publicKeyBase64) { - if (publicKeyBase64 == null || publicKeyBase64.isEmpty()) { - return ""; - } - try { - byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64.trim()); - if (keyBytes == null || keyBytes.length == 0) { - return ""; - } - MessageDigest md = MessageDigest.getInstance("SHA-256"); - byte[] hash = md.digest(keyBytes); - StringBuilder sb = new StringBuilder(hash.length * 2); - for (byte b : hash) { - sb.append(String.format("%02x", b & 0xff)); - } - return sb.toString(); - } catch (IllegalArgumentException | NoSuchAlgorithmException e) { - return ""; - } - } - - private void saveOrUpdateHostKeyIdentity(String hostUuid, String publicKey, boolean verified) { - if (StringUtils.isBlank(publicKey)) { - return; - } - String keyToSave = publicKey.trim(); - String fingerprint = fingerprintFromPublicKey(keyToSave); - HostKeyIdentityVO vo = getHostKeyIdentity(hostUuid); - if (vo == null) { - vo = new HostKeyIdentityVO(); - vo.setHostUuid(hostUuid); - vo.setPublicKey(keyToSave); - vo.setFingerprint(fingerprint); - vo.setVerified(verified); - vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); - dbf.persist(vo); - return; - } - vo.setPublicKey(keyToSave); - vo.setFingerprint(fingerprint); - vo.setVerified(verified); - dbf.update(vo); - } - - private static final int MAX_DEK_BYTES = 1024; - private void handle(SecretHostDefineMsg msg) { SecretHostDefineReply reply = new SecretHostDefineReply(); if (org.apache.commons.lang.StringUtils.isBlank(msg.getDekBase64())) { @@ -5390,7 +5320,7 @@ private void handle(SecretHostDefineMsg msg) { return; } String hostUuid = getSelf().getUuid(); - HostKeyIdentityVO identity = getHostKeyIdentity(hostUuid); + HostKeyIdentityVO identity = HostKeyIdentityHelper.getHostKeyIdentity(dbf, hostUuid); String pubKey = identity != null ? org.apache.commons.lang.StringUtils.trimToNull(identity.getPublicKey()) : null; Boolean verifyOk = identity != null ? identity.getVerified() : null; if (pubKey == null) { @@ -5399,7 +5329,7 @@ private void handle(SecretHostDefineMsg msg) { return; } String storedFingerprint = identity.getFingerprint(); - String computed = fingerprintFromPublicKey(pubKey); + String computed = HostKeyIdentityHelper.fingerprintFromPublicKey(pubKey); if (!storedFingerprint.equals(computed)) { reply.setError(operr("host public key fingerprint mismatch, key may be corrupted or tampered")); bus.reply(msg, reply); @@ -5424,7 +5354,7 @@ private void handle(SecretHostDefineMsg msg) { return; } - if (dekRaw.length > MAX_DEK_BYTES) { + if (dekRaw.length > KVMConstant.MAX_DEK_BYTES) { reply.setError(operr("dekBase64 decoded payload is too large")); bus.reply(msg, reply); return; From b432c1bb59a8484604c96865307a09a9ab43d2d9 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Tue, 10 Mar 2026 20:49:46 +0800 Subject: [PATCH 63/76] Mova secret test to premium --- .../kvm/host/HostSecretCase.groovy | 225 ------------------ ...stSecretEnvelopeCryptoTestExtension.groovy | 25 -- .../HostSecretEnvelopeCryptoTestExtension.xml | 15 -- 3 files changed, 265 deletions(-) delete mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy delete mode 100644 test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy delete mode 100644 test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy deleted file mode 100644 index 144c3a76613..00000000000 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretCase.groovy +++ /dev/null @@ -1,225 +0,0 @@ -package org.zstack.test.integration.kvm.host - -import org.zstack.core.Platform -import org.zstack.core.cloudbus.CloudBus -import org.zstack.core.db.DatabaseFacade -import org.zstack.core.db.SimpleQuery -import org.zstack.core.db.SimpleQuery.Op -import org.zstack.header.host.AddHostReply -import org.zstack.header.host.HostConstant -import org.zstack.header.host.HostInventory -import org.zstack.header.host.HostKeyIdentityVO -import org.zstack.header.host.HostKeyIdentityVO_ -import org.zstack.header.host.HostStatus -import org.zstack.header.host.PingHostMsg -import org.zstack.header.host.PingHostReply -import org.zstack.header.message.MessageReply -import org.zstack.kvm.AddKVMHostMsg -import org.zstack.kvm.KVMConstant -import org.zstack.kvm.KVMAgentCommands -import org.zstack.storage.primary.local.LocalStorageKvmBackend -import org.zstack.test.integration.kvm.KvmTest -import org.springframework.http.HttpEntity -import org.zstack.testlib.EnvSpec -import org.zstack.testlib.SubCase -import org.zstack.header.secret.SecretHostDefineMsg -import org.zstack.header.secret.SecretHostDefineReply - -import java.security.MessageDigest -import java.util.concurrent.atomic.AtomicInteger - -/** - * Integration test for SecretHostDefine (ensure secret on agent) on KVM host. - * Uses simulated agent; envelope key sync-on-ping and HPKE seal are covered in premium tests. - */ -class HostSecretCase extends SubCase { - EnvSpec env - def cluster - CloudBus bus - HostInventory addedHost - - /** Counters for simulator call assertions (ensureSecret). */ - AtomicInteger ensureSecretCallCount - - /** 32-byte X25519 public key (base64) for simulator; must be valid for HPKE seal. */ - static final String MOCK_PUBLIC_KEY_BASE64 = "AQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyA=" - - /** Same algorithm as KVMHost.fingerprintFromPublicKey: SHA-256(decoded base64) in hex. */ - static String fingerprintFromPublicKey(String publicKeyBase64) { - if (publicKeyBase64 == null || publicKeyBase64.isEmpty()) return "" - try { - byte[] keyBytes = java.util.Base64.getDecoder().decode(publicKeyBase64.trim()) - if (keyBytes == null || keyBytes.length == 0) return "" - MessageDigest md = MessageDigest.getInstance("SHA-256") - byte[] hash = md.digest(keyBytes) - StringBuilder sb = new StringBuilder(hash.length * 2) - for (byte b : hash) sb.append(String.format("%02x", b & 0xff)) - return sb.toString() - } catch (Exception e) { - return "" - } - } - - @Override - void setup() { - // Run without premium/crypto; use test extension HostSecretEnvelopeCryptoTestExtension so that - // SecretHostDefine path can run and call the agent simulator. Sync-on-ping is covered in premium tests. - useSpring(makeSpring { - sftpBackupStorage() - localStorage() - nfsPrimaryStorage() - smp() - virtualRouter() - flatNetwork() - securityGroup() - kvm() - ceph() - vyos() - flatNetwork() - eip() - lb() - portForwarding() - include("LongJobManager.xml") - include("HostAllocateExtension.xml") - include("HostSecretEnvelopeCryptoTestExtension.xml") - }) - } - - @Override - void environment() { - env = HostEnv.noHostBasicEnv() - } - - @Override - void test() { - env.create { - prepare() - prepareHostForSecretTests() - testSecretHostDefineSuccess() - testSecretHostDefineFailWhenNoDek() - } - } - - @Override - void clean() { - env.delete() - } - - void prepare() { - cluster = env.inventoryByName("cluster") - bus = bean(CloudBus.class) - } - - void registerSecretSimulators() { - ensureSecretCallCount = new AtomicInteger(0) - - env.simulator(KVMConstant.KVM_CONNECT_PATH) { - def rsp = new KVMAgentCommands.ConnectResponse() - rsp.success = true - rsp.libvirtVersion = "1.0.0" - rsp.qemuVersion = "1.3.0" - return rsp - } - // Use afterSimulator like AddHostCase: rely on testlib default HostFactResponse, only set what this test needs. - env.afterSimulator(KVMConstant.KVM_HOST_FACT_PATH) { KVMAgentCommands.HostFactResponse rsp -> - rsp.hvmCpuFlag = "vmx" // default is ""; connect needs vmx/svm to pass checkVirtualizationEnabled - return rsp - } - env.simulator(LocalStorageKvmBackend.INIT_PATH) { HttpEntity e -> - def rsp = new LocalStorageKvmBackend.InitRsp() - rsp.success = true - rsp.localStorageUsedCapacity = 0L - rsp.totalCapacity = 0L - rsp.availableCapacity = 0L - return rsp - } - - env.simulator(KVMConstant.KVM_ENSURE_SECRET_PATH) { - ensureSecretCallCount?.incrementAndGet() - def rsp = new KVMAgentCommands.SecretHostDefineResponse() - rsp.secretUuid = Platform.uuid - return rsp - } - } - - /** - * Prepare a connected KVM host and corresponding HostKeyIdentityVO so that - * SecretHostDefineMsg can succeed. Envelope key sync-on-ping itself is - * covered by premium KVMEnvelopeKeySyncExtensionCase. - */ - void prepareHostForSecretTests() { - registerSecretSimulators() - - AddKVMHostMsg amsg = new AddKVMHostMsg() - amsg.accountUuid = loginAsAdmin().accountUuid - amsg.name = "kvm" - amsg.managementIp = "127.0.0.2" - amsg.resourceUuid = Platform.uuid - amsg.clusterUuid = cluster.uuid - amsg.setPassword("password") - amsg.setUsername("root") - - bus.makeLocalServiceId(amsg, HostConstant.SERVICE_ID) - AddHostReply reply = (AddHostReply) bus.call(amsg) - assert reply != null - assert reply.isSuccess() : "AddHost failed: ${reply.error?.toString() ?: 'no error'}" - assert reply.inventory.status == HostStatus.Connected.toString() - addedHost = reply.inventory - - // Ensure HostKeyIdentity exists with expected key so SecretHostDefineMsg finds it and fingerprint check passes. - DatabaseFacade dbf = bean(DatabaseFacade.class) - SimpleQuery q = dbf.createQuery(HostKeyIdentityVO.class) - q.add(HostKeyIdentityVO_.hostUuid, Op.EQ, addedHost.uuid) - HostKeyIdentityVO keyVo = q.find() - if (keyVo == null) { - keyVo = new HostKeyIdentityVO() - keyVo.hostUuid = addedHost.uuid - keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 - keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) - keyVo.verified = true - dbf.persist(keyVo) - } else { - keyVo.publicKey = MOCK_PUBLIC_KEY_BASE64 - keyVo.fingerprint = fingerprintFromPublicKey(MOCK_PUBLIC_KEY_BASE64) - keyVo.verified = true - dbf.update(keyVo) - } - } - - void testSecretHostDefineSuccess() { - assert addedHost != null - - int countBefore = ensureSecretCallCount.get() - - SecretHostDefineMsg msg = new SecretHostDefineMsg() - msg.hostUuid = addedHost.uuid - msg.dekBase64 = "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8=" - msg.vmUuid = Platform.uuid - msg.purpose = "test-vtpm" - msg.providerName = "vtpm" - - bus.makeTargetServiceIdByResourceUuid(msg, HostConstant.SERVICE_ID, addedHost.uuid) - MessageReply reply = bus.call(msg) - assert reply != null - assert reply.isSuccess() - SecretHostDefineReply defineReply = reply.castReply() - assert defineReply.secretUuid != null - - // Ensure KVM_ENSURE_SECRET_PATH was actually called (asyncJsonPost to agent). - assert ensureSecretCallCount.get() == countBefore + 1 : "KVM_ENSURE_SECRET_PATH simulator should be called exactly once for SecretHostDefineMsg" - } - - void testSecretHostDefineFailWhenNoDek() { - assert addedHost != null - - SecretHostDefineMsg msg = new SecretHostDefineMsg() - msg.hostUuid = addedHost.uuid - msg.dekBase64 = null - - bus.makeTargetServiceIdByResourceUuid(msg, HostConstant.SERVICE_ID, addedHost.uuid) - MessageReply reply = bus.call(msg) - assert reply != null - assert !reply.isSuccess() - assert reply.error != null - } -} \ No newline at end of file diff --git a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy b/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy deleted file mode 100644 index f15f7ce8b16..00000000000 --- a/test/src/test/groovy/org/zstack/test/integration/kvm/host/HostSecretEnvelopeCryptoTestExtension.groovy +++ /dev/null @@ -1,25 +0,0 @@ -package org.zstack.test.integration.kvm.host.secret - -import org.zstack.kvm.HostSecretEnvelopeCryptoExtensionPoint - -/** - * Test-side mock of HostSecretEnvelopeCryptoExtensionPoint. - * Does NOT call premium crypto; just returns a fake envelope that looks structurally valid to the agent. - */ -class HostSecretEnvelopeCryptoTestExtension implements HostSecretEnvelopeCryptoExtensionPoint { - @Override - byte[] seal(byte[] recipientPublicKey, byte[] plaintext) throws Exception { - if (recipientPublicKey == null || recipientPublicKey.length != 32 || plaintext == null) { - throw new IllegalArgumentException("recipientPublicKey must be 32 bytes, plaintext non-null") - } - int encLen = 32 - int tagLen = 12 - byte[] envelope = new byte[encLen + plaintext.length + tagLen] - // Put the recipient public key into the "enc" slot so envelope[0..31] looks like a plausible X25519 public key. - System.arraycopy(recipientPublicKey, 0, envelope, 0, encLen) - // Copy plaintext in the middle; the simulator never decrypts it. - System.arraycopy(plaintext, 0, envelope, encLen, plaintext.length) - // Leave the last 12 bytes as zeros to mimic an AEAD tag. - return envelope - } -} diff --git a/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml b/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml deleted file mode 100644 index 9b4cb4b288f..00000000000 --- a/test/src/test/resources/springConfigXml/HostSecretEnvelopeCryptoTestExtension.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - From 55d935a16ccf30e55d03b9c9ca5d5c73431492ce Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 10:33:52 +0800 Subject: [PATCH 64/76] Remove unused envelopeDekBase64 --- plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 3f982544168..19d65295d22 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -424,7 +424,6 @@ public void setErrorCode(String errorCode) { } public static class SecretHostDefineCmd extends AgentCommand { - private String envelopeDekBase64; /** Base64 envelope of DEK; agent expects this field name (encryptedDek). */ private String encryptedDek; private String vmUuid; From e2242e1af00ed52d5741d1671e577555f13715ea Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 10:38:28 +0800 Subject: [PATCH 65/76] Verify storedFingerprint is null --- plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 01635771110..65a3f682a11 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5328,9 +5328,9 @@ private void handle(SecretHostDefineMsg msg) { bus.reply(msg, reply); return; } - String storedFingerprint = identity.getFingerprint(); + String storedFingerprint = StringUtils.trimToNull(identity.getFingerprint()); String computed = HostKeyIdentityHelper.fingerprintFromPublicKey(pubKey); - if (!storedFingerprint.equals(computed)) { + if (storedFingerprint == null || !StringUtils.equals(storedFingerprint, computed)) { reply.setError(operr("host public key fingerprint mismatch, key may be corrupted or tampered")); bus.reply(msg, reply); return; From 54bd2d9db16f1df3520fc72a657aacc472859167 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 10:50:46 +0800 Subject: [PATCH 66/76] Remove self errCode --- .../java/org/zstack/kvm/KVMAgentCommands.java | 27 ------------------- .../src/main/java/org/zstack/kvm/KVMHost.java | 6 ++--- 2 files changed, 3 insertions(+), 30 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java index 19d65295d22..e1be2dc98b0 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMAgentCommands.java @@ -383,7 +383,6 @@ public static class GetPublicKeyCmd extends AgentCommand { public static class GetPublicKeyResponse extends AgentResponse { private String publicKey; - private String errorCode; public String getPublicKey() { return publicKey; @@ -392,14 +391,6 @@ public String getPublicKey() { public void setPublicKey(String publicKey) { this.publicKey = publicKey; } - - public String getErrorCode() { - return errorCode; - } - - public void setErrorCode(String errorCode) { - this.errorCode = errorCode; - } } public static class RotatePublicKeyCmd extends AgentCommand { @@ -412,15 +403,6 @@ public static class VerifyPublicKeyCmd extends AgentCommand { } public static class VerifyPublicKeyResponse extends AgentResponse { - private String errorCode; - - public String getErrorCode() { - return errorCode; - } - - public void setErrorCode(String errorCode) { - this.errorCode = errorCode; - } } public static class SecretHostDefineCmd extends AgentCommand { @@ -473,17 +455,8 @@ public void setDescription(String description) { } public static class SecretHostDefineResponse extends AgentResponse { - private String errorCode; private String secretUuid; - public String getErrorCode() { - return errorCode; - } - - public void setErrorCode(String errorCode) { - this.errorCode = errorCode; - } - public String getSecretUuid() { return secretUuid; } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java index 65a3f682a11..a23773bbca2 100755 --- a/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/KVMHost.java @@ -5409,10 +5409,10 @@ public void success(KVMAgentCommands.SecretHostDefineResponse rsp) { reply.setSecretUuid(rsp.getSecretUuid()); } } else { - if (rsp != null && rsp.getErrorCode() != null) { + if (rsp != null && rsp.getError() != null) { ErrorCode err = new ErrorCode(); - err.setCode(rsp.getErrorCode()); - err.setDetails(rsp.getError() != null ? rsp.getError() : "ensure secret failed"); + err.setCode(rsp.getError()); + err.setDetails(rsp.getError()); reply.setError(err); } else { reply.setError(operr(rsp != null ? rsp.getError() : "ensure secret failed")); From 2eeea982a4abe696367cdcedb8d54b1d2bc88b67 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 12:01:14 +0800 Subject: [PATCH 67/76] Fix some review suggestion --- .../main/java/org/zstack/header/secret/SecretHostDefineMsg.java | 2 +- .../kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java index e8aeb6d9370..1997267b055 100644 --- a/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java +++ b/header/src/main/java/org/zstack/header/secret/SecretHostDefineMsg.java @@ -66,4 +66,4 @@ public String getDescription() { public void setDescription(String description) { this.description = description; } -} \ No newline at end of file +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java index ffe65edca47..da59697a290 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java @@ -33,7 +33,7 @@ public static String fingerprintFromPublicKey(String publicKeyBase64) { } try { byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64.trim()); - if (keyBytes == null || keyBytes.length == 0) { + if (keyBytes.length == 0) { return ""; } MessageDigest md = MessageDigest.getInstance("SHA-256"); From 9ed89190544e20b0dbe8ab658882902ee78522c7 Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 12:41:32 +0800 Subject: [PATCH 68/76] Fix db Duplicate entry --- .../org/zstack/kvm/HostKeyIdentityHelper.java | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java index da59697a290..4cc3fa82830 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java @@ -7,10 +7,14 @@ import org.zstack.header.host.HostKeyIdentityVO; import org.zstack.header.host.HostKeyIdentityVO_; import org.zstack.header.secret.SecretHostDefineReply; +import org.zstack.utils.ExceptionDSL; import org.zstack.utils.logging.CLogger; +import javax.persistence.EntityExistsException; +import javax.persistence.PersistenceException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.sql.SQLIntegrityConstraintViolationException; import java.util.Base64; /** @@ -92,8 +96,19 @@ public static void saveOrUpdateHostKeyIdentity(DatabaseFacade dbf, String hostUu vo.setFingerprint(fingerprint); vo.setVerified(verified); vo.setCreateDate(new java.sql.Timestamp(System.currentTimeMillis())); - dbf.persist(vo); - return; + try { + dbf.persist(vo); + return; + } catch (EntityExistsException | PersistenceException e) { + if (!ExceptionDSL.isCausedBy(e, EntityExistsException.class) + && !ExceptionDSL.isCausedBy(e, SQLIntegrityConstraintViolationException.class, "Duplicate entry")) { + throw e; + } + vo = getHostKeyIdentity(dbf, hostUuid); + if (vo == null) { + throw e; + } + } } vo.setPublicKey(keyToSave); vo.setFingerprint(fingerprint); From d6c42afa21ead4e1a61bbcb4b772a6f1031876fb Mon Sep 17 00:00:00 2001 From: "zhong.zhou" Date: Wed, 11 Mar 2026 13:20:10 +0800 Subject: [PATCH 69/76] Remove catching EntityExistsException --- .../kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java index 4cc3fa82830..d100dbeee17 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/HostKeyIdentityHelper.java @@ -99,7 +99,7 @@ public static void saveOrUpdateHostKeyIdentity(DatabaseFacade dbf, String hostUu try { dbf.persist(vo); return; - } catch (EntityExistsException | PersistenceException e) { + } catch (PersistenceException e) { if (!ExceptionDSL.isCausedBy(e, EntityExistsException.class) && !ExceptionDSL.isCausedBy(e, SQLIntegrityConstraintViolationException.class, "Duplicate entry")) { throw e; From 20eb2085712f07fbc70e503f809d510e0138bcc7 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 12 Mar 2026 16:03:33 +0800 Subject: [PATCH 70/76] [kvm]: introduce VmHostBackupFileVO Resolves: ZSV-11439 Related: ZSV-11310 Change-Id: I63706e6a7a75616366716574646a677a6e646276 --- conf/db/zsv/V5.0.0__schema.sql | 19 +++-- conf/persistence.xml | 1 + .../vm/additions/VmHostBackupFileVO.java | 80 +++++++++++++++++++ .../vm/additions/VmHostBackupFileVO_.java | 15 ++++ .../header/vm/additions/VmHostFileType.java | 2 - 5 files changed, 110 insertions(+), 7 deletions(-) create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO.java create mode 100644 header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO_.java diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index ac3b849ec1e..64eb64b56b0 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -13,26 +13,35 @@ CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileVO` ( `uuid` char(32) NOT NULL UNIQUE, `vmInstanceUuid` char(32) NOT NULL, `hostUuid` char(32) NOT NULL, - `type` varchar(64) NOT NULL COMMENT 'NvRam, TpmState, NvRamBackup, TpmStateBackup', + `type` varchar(64) NOT NULL COMMENT 'NvRam, TpmState', `path` varchar(1024) NOT NULL COMMENT 'Absolute path of the file on the host', `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', PRIMARY KEY (`uuid`), INDEX `idxVmHostFileVOVmInstanceUuid` (`vmInstanceUuid`), INDEX `idxVmHostFileVOHostUuid` (`hostUuid`), - CONSTRAINT `fkVmHostFileVOVmInstanceVO` FOREIGN KEY (`vmInstanceUuid`) REFERENCES `VmInstanceEO` (`uuid`) ON DELETE CASCADE, - CONSTRAINT `fkVmHostFileVOHostVO` FOREIGN KEY (`hostUuid`) REFERENCES `HostEO` (`uuid`) ON DELETE CASCADE, UNIQUE KEY `ukVmHostFileVO` (`vmInstanceUuid`, `hostUuid`, `type`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileContentVO` ( +CREATE TABLE IF NOT EXISTS `zstack`.`VmHostBackupFileVO` ( `uuid` char(32) NOT NULL UNIQUE, + `vmInstanceUuid` char(32) NOT NULL, + `type` varchar(64) NOT NULL COMMENT 'NvRam, TpmState', + `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', + PRIMARY KEY (`uuid`), + INDEX `idxVmHostBackupFileVOVmInstanceUuid` (`vmInstanceUuid`), + UNIQUE KEY `ukVmHostBackupFileVO` (`vmInstanceUuid`, `type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `zstack`.`VmHostFileContentVO` ( + `uuid` char(32) NOT NULL UNIQUE COMMENT 'VmHostFileVO.uuid or VmHostBackupFileVO.uuid', `content` MEDIUMBLOB DEFAULT NULL, `format` varchar(64) NOT NULL COMMENT 'Raw, TarballGzip', `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', PRIMARY KEY (`uuid`), - CONSTRAINT `fkVmHostFileContentVOVmHostFileVO` FOREIGN KEY (`uuid`) REFERENCES `VmHostFileVO` (`uuid`) ON DELETE CASCADE + CONSTRAINT `fkVmHostFileContentVOResourceVO` FOREIGN KEY (`uuid`) REFERENCES `ResourceVO` (`uuid`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- Feature: KMS | ZSPHER-46, ZSPHER-60, ZSPHER-61, ZSPHER-62 diff --git a/conf/persistence.xml b/conf/persistence.xml index 0fa065a6a71..8f3b34b367c 100755 --- a/conf/persistence.xml +++ b/conf/persistence.xml @@ -20,6 +20,7 @@ org.zstack.header.managementnode.ManagementNodeContextVO org.zstack.header.tpm.entity.TpmVO org.zstack.header.vm.additions.VmHostFileVO + org.zstack.header.vm.additions.VmHostBackupFileVO org.zstack.header.vm.additions.VmHostFileContentVO org.zstack.header.zone.ZoneVO org.zstack.header.zone.ZoneEO diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO.java new file mode 100644 index 00000000000..94662c612e0 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO.java @@ -0,0 +1,80 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.vm.VmInstanceEO; +import org.zstack.header.vo.EntityGraph; +import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.ResourceVO; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.Table; +import java.sql.Timestamp; + +/** + * Virtual Machine Host-side File Value Object (Backup files) + * + * Include: NvRam / TpmState files + */ +@Entity +@Table +@EntityGraph( + friends = { + @EntityGraph.Neighbour(type = VmInstanceEO.class, myField = "vmInstanceUuid", targetField = "uuid"), + } +) +public class VmHostBackupFileVO extends ResourceVO { + @Column + @ForeignKey(parentEntityClass = VmInstanceEO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + private String vmInstanceUuid; + @Column + @Enumerated(EnumType.STRING) + private VmHostFileType type; + @Column + private Timestamp createDate; + @Column + private Timestamp lastOpDate; + + public String getVmInstanceUuid() { + return vmInstanceUuid; + } + + public void setVmInstanceUuid(String vmInstanceUuid) { + this.vmInstanceUuid = vmInstanceUuid; + } + + public VmHostFileType getType() { + return type; + } + + public void setType(VmHostFileType type) { + this.type = type; + } + + public Timestamp getCreateDate() { + return createDate; + } + + public void setCreateDate(Timestamp createDate) { + this.createDate = createDate; + } + + public Timestamp getLastOpDate() { + return lastOpDate; + } + + public void setLastOpDate(Timestamp lastOpDate) { + this.lastOpDate = lastOpDate; + } + + @Override + public String toString() { + return "VmHostBackupFileVO{" + + "vmInstanceUuid='" + vmInstanceUuid + '\'' + + ", type=" + type + + ", createDate=" + createDate + + ", lastOpDate=" + lastOpDate + + '}'; + } +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO_.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO_.java new file mode 100644 index 00000000000..e45e804f381 --- /dev/null +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostBackupFileVO_.java @@ -0,0 +1,15 @@ +package org.zstack.header.vm.additions; + +import org.zstack.header.vo.ResourceVO_; + +import javax.persistence.metamodel.SingularAttribute; +import javax.persistence.metamodel.StaticMetamodel; +import java.sql.Timestamp; + +@StaticMetamodel(VmHostBackupFileVO.class) +public class VmHostBackupFileVO_ extends ResourceVO_ { + public static volatile SingularAttribute vmInstanceUuid; + public static volatile SingularAttribute type; + public static volatile SingularAttribute createDate; + public static volatile SingularAttribute lastOpDate; +} diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java index 16c493e6768..4416821e949 100644 --- a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileType.java @@ -3,6 +3,4 @@ public enum VmHostFileType { NvRam, TpmState, - NvRamBackup, - TpmStateBackup, } From 9c35f1fecec1c2148eea45f06ea9e38a9587d7dd Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 12 Mar 2026 14:38:30 +0800 Subject: [PATCH 71/76] [kvm]: support clone for TPM VM * Added support for cross-VM cloning of host files (NvRam, TpmState) and TPM encryption key cloning * KvmSecureBootManager transitioned from a Component to a service * Introducing new message/response types, workflows, and TPM key backend interfaces along with their virtual implementations Resolves: ZSV-11439 Related: ZSV-11310 Change-Id: I796b77716b6c64657469697a7a797a7268636e6e --- .../compute/vm/devices/VmTpmManager.java | 16 + conf/springConfigXml/Kvm.xml | 3 + .../zstack/header/vm/VmInstanceConstant.java | 1 + .../zstack/kvm/efi/CloneVmHostFileMsg.java | 35 +++ .../zstack/kvm/efi/CloneVmHostFileReply.java | 6 + .../kvm/efi/KvmSecureBootExtensions.java | 4 + .../zstack/kvm/efi/KvmSecureBootManager.java | 283 +++++++++++++++++- .../org/zstack/kvm/tpm/CloneVmTpmMsg.java | 35 +++ .../org/zstack/kvm/tpm/CloneVmTpmReply.java | 18 ++ .../DummyTpmEncryptedResourceKeyBackend.java | 17 ++ .../org/zstack/kvm/tpm/KvmTpmManager.java | 110 +++++++ .../tpm/TpmEncryptedResourceKeyBackend.java | 31 ++ .../test/resources/springConfigXml/Kvm.xml | 3 + 13 files changed, 560 insertions(+), 2 deletions(-) create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileMsg.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileReply.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmMsg.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmReply.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java create mode 100644 plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java index 19099f290a7..70ba4beced6 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java @@ -3,14 +3,19 @@ import org.springframework.beans.factory.annotation.Autowired; import org.zstack.core.Platform; import org.zstack.core.db.DatabaseFacade; +import org.zstack.core.db.Q; import org.zstack.header.image.ImageBootMode; import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; +import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; import java.util.Objects; +import static org.zstack.compute.vm.VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT; + public class VmTpmManager { private static final CLogger logger = Utils.getLogger(VmTpmManager.class); @@ -40,4 +45,15 @@ public static boolean isUefiBootMode(String bootMode) { return Objects.equals(bootMode, ImageBootMode.UEFI.toString()) || Objects.equals(bootMode, ImageBootMode.UEFI_WITH_CSM.toString()); } + + public boolean needRegisterNvRam(String vmUuid) { + boolean tpmExists = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, vmUuid) + .isExists(); + if (tpmExists) { + return true; + } + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(ENABLE_UEFI_SECURE_BOOT.getIdentity()); + return resourceConfig.getResourceConfigValue(vmUuid, Boolean.class); + } } diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 88886397a1f..d41d947234f 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -262,6 +262,7 @@ + @@ -271,4 +272,6 @@ + + diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java b/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java index e767df877f1..43748d46dc1 100755 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceConstant.java @@ -6,6 +6,7 @@ @PythonClass public interface VmInstanceConstant { String SERVICE_ID = "vmInstance"; + String SECURE_BOOT_SERVICE_ID = "secureBoot"; String ACTION_CATEGORY = "instance"; @PythonClass String USER_VM_TYPE = "UserVm"; diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileMsg.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileMsg.java new file mode 100644 index 00000000000..d28226995e8 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileMsg.java @@ -0,0 +1,35 @@ +package org.zstack.kvm.efi; + +import org.zstack.header.message.NeedReplyMessage; + +import java.util.List; + +public class CloneVmHostFileMsg extends NeedReplyMessage { + private String srcVmUuid; + private List dstVmUuidList; + private Boolean resetTpm; + + public String getSrcVmUuid() { + return srcVmUuid; + } + + public void setSrcVmUuid(String srcVmUuid) { + this.srcVmUuid = srcVmUuid; + } + + public List getDstVmUuidList() { + return dstVmUuidList; + } + + public void setDstVmUuidList(List dstVmUuidList) { + this.dstVmUuidList = dstVmUuidList; + } + + public Boolean getResetTpm() { + return resetTpm; + } + + public void setResetTpm(Boolean resetTpm) { + this.resetTpm = resetTpm; + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileReply.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileReply.java new file mode 100644 index 00000000000..b026831c503 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/CloneVmHostFileReply.java @@ -0,0 +1,6 @@ +package org.zstack.kvm.efi; + +import org.zstack.header.message.MessageReply; + +public class CloneVmHostFileReply extends MessageReply { +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 83b8e8720c8..82b2ad202d6 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -264,6 +264,10 @@ public void success(KvmResponseWrapper wrapper) { content.setLastOpDate(now); databaseFacade.persist(content); } + + if (logger.isTraceEnabled()) { + logger.trace(String.format("persist/update VmHostFileContentVO [uuid=%s]", file.getUuid())); + } } if (errors.isEmpty()) { diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java index e6fc30333cc..1febc137e09 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootManager.java @@ -2,34 +2,69 @@ import org.springframework.beans.factory.annotation.Autowired; import org.zstack.compute.legacy.ComputeLegacyGlobalProperty; +import org.zstack.core.Platform; +import org.zstack.core.asyncbatch.While; +import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.EventCallback; import org.zstack.core.cloudbus.EventFacadeImpl; import org.zstack.core.db.Q; -import org.zstack.header.Component; +import org.zstack.core.db.SQLBatch; +import org.zstack.core.workflow.SimpleFlowChain; +import org.zstack.header.AbstractService; import org.zstack.header.core.Completion; +import org.zstack.header.core.WhileDoneCompletion; +import org.zstack.header.core.workflow.FlowDoneHandler; +import org.zstack.header.core.workflow.FlowErrorHandler; +import org.zstack.header.core.workflow.FlowTrigger; +import org.zstack.header.core.workflow.NoRollbackFlow; import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.errorcode.ErrorCodeList; +import org.zstack.header.exception.CloudRuntimeException; +import org.zstack.header.message.Message; +import org.zstack.header.tpm.entity.TpmVO; +import org.zstack.header.tpm.entity.TpmVO_; import org.zstack.header.vm.VmCanonicalEvents; +import org.zstack.header.vm.VmInstanceConstant; import org.zstack.header.vm.VmInstanceVO; import org.zstack.header.vm.VmInstanceVO_; +import org.zstack.header.vm.additions.VmHostBackupFileVO; +import org.zstack.header.vm.additions.VmHostBackupFileVO_; +import org.zstack.header.vm.additions.VmHostFileContentVO; +import org.zstack.header.vm.additions.VmHostFileContentVO_; import org.zstack.header.vm.additions.VmHostFileType; import org.zstack.header.vm.additions.VmHostFileVO; import org.zstack.header.vm.additions.VmHostFileVO_; +import org.zstack.resourceconfig.ResourceConfig; +import org.zstack.resourceconfig.ResourceConfigFacade; +import org.zstack.utils.DebugUtils; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; import javax.persistence.Tuple; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.zstack.compute.vm.VmGlobalConfig.ENABLE_UEFI_SECURE_BOOT; +import static org.zstack.compute.vm.VmGlobalConfig.RESET_TPM_AFTER_VM_CLONE; +import static org.zstack.kvm.efi.KvmSecureBootExtensions.*; import static org.zstack.utils.CollectionDSL.list; import static org.zstack.utils.CollectionUtils.findOneOrNull; +import static org.zstack.utils.CollectionUtils.transform; -public class KvmSecureBootManager implements Component { +public class KvmSecureBootManager extends AbstractService { private static final CLogger logger = Utils.getLogger(KvmSecureBootManager.class); + @Autowired + private CloudBus bus; @Autowired private EventFacadeImpl eventFacade; @Autowired + private ResourceConfigFacade resourceConfigFacade; + @Autowired private KvmSecureBootExtensions secureBootExtensions; @Override @@ -101,4 +136,248 @@ public void fail(ErrorCode errorCode) { } }); } + + @Override + public String getId() { + return bus.makeLocalServiceId(VmInstanceConstant.SECURE_BOOT_SERVICE_ID); + } + + @Override + public void handleMessage(Message msg) { + if (msg instanceof CloneVmHostFileMsg) { + handle((CloneVmHostFileMsg) msg); + } else { + bus.dealWithUnknownMessage(msg); + } + } + + static class CloneVmHostFileContext { + List typesNeedClone = new ArrayList<>(); + List files = new ArrayList<>(); + List backupFiles = new ArrayList<>(); + List syncContexts = new ArrayList<>(); + } + + @SuppressWarnings("rawtypes") + private void handle(CloneVmHostFileMsg msg) { + CloneVmHostFileReply reply = new CloneVmHostFileReply(); + + boolean hasTpm = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, msg.getSrcVmUuid()) + .isExists(); + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(ENABLE_UEFI_SECURE_BOOT.getIdentity()); + boolean secureBoot = resourceConfig.getResourceConfigValue(msg.getSrcVmUuid(), Boolean.class); + if (!hasTpm && !secureBoot) { + bus.reply(msg, reply); + return; + } + + CloneVmHostFileContext context = new CloneVmHostFileContext(); + context.typesNeedClone.add(VmHostFileType.NvRam); + if (hasTpm) { + boolean resetTpm; + if (msg.getResetTpm() == null) { + resourceConfig = resourceConfigFacade.getResourceConfig(RESET_TPM_AFTER_VM_CLONE.getIdentity()); + resetTpm = resourceConfig.getResourceConfigValue(msg.getSrcVmUuid(), Boolean.class); + } else { + resetTpm = msg.getResetTpm(); + } + if (!resetTpm) { + context.typesNeedClone.add(VmHostFileType.TpmState); + } + } + logger.debug(String.format("clone VM[uuid=%s] host files for types: %s", msg.getSrcVmUuid(), context.typesNeedClone)); + + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setName("clone-vm-host-file"); + chain.then(new NoRollbackFlow() { + String __name__ = "prepare-sync-vm-host-file-context-list"; + + @Override + public void run(FlowTrigger trigger, Map data) { + for (VmHostFileType type : context.typesNeedClone) { + VmHostFileVO file = Q.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, msg.getSrcVmUuid()) + .eq(VmHostFileVO_.type, type) + .orderByDesc(VmHostFileVO_.lastOpDate) + .limit(1) + .find(); + if (file == null) { + logger.debug(String.format("skip to read/write %s host file for VM[vmUuid=%s]: file is not registered in MN", + type, msg.getSrcVmUuid())); + continue; + } + context.files.add(file); + } + + if (context.files.isEmpty()) { + trigger.next(); + return; + } + + Map contextMap = new HashMap<>(); + for (VmHostFileVO file : context.files) { + contextMap.computeIfAbsent(file.getHostUuid(), hostUuid -> { + SyncVmHostFilesFromHostContext syncContext = new SyncVmHostFilesFromHostContext(); + syncContext.hostUuid = hostUuid; + syncContext.vmUuid = msg.getSrcVmUuid(); + return syncContext; + }); + } + context.syncContexts.addAll(contextMap.values()); + + for (VmHostFileVO file : context.files) { + SyncVmHostFilesFromHostContext syncContext = contextMap.get(file.getHostUuid()); + if (file.getType() == VmHostFileType.NvRam) { + syncContext.nvRamPath = file.getPath(); + } else if (file.getType() == VmHostFileType.TpmState) { + syncContext.tpmStateFolder = file.getPath(); + } else { + throw new CloudRuntimeException("unsupported vm host file type: " + file.getType()); + } + } + + trigger.next(); + } + }).then(new NoRollbackFlow() { + String __name__ = "read-vm-host-file-from-origin-host"; + + @Override + public boolean skip(Map data) { + return context.syncContexts.isEmpty(); + } + + @Override + public void run(FlowTrigger trigger, Map data) { + new While<>(context.syncContexts).each((syncContext, whileContext) -> + secureBootExtensions.syncVmHostFilesFromHost(syncContext, new Completion(whileContext) { + @Override + public void success() { + whileContext.done(); + } + + @Override + public void fail(ErrorCode errorCode) { + whileContext.addError(errorCode); + whileContext.done(); + } + }) + ).run(new WhileDoneCompletion(trigger) { + @Override + public void done(ErrorCodeList errorCodeList) { + if (!errorCodeList.isEmpty()) { + logger.warn(String.format("failed to sync host file for VM[uuid=%s] but still continue:\n%s", + msg.getSrcVmUuid(), + String.join("\n", transform(errorCodeList.getCauses(), ErrorCode::getReadableDetails)))); + } + trigger.next(); + } + }); + } + }).then(new NoRollbackFlow() { + String __name__ = "determine-content-uuid"; + + @Override + public void run(FlowTrigger trigger, Map data) { + List missingTypes = new ArrayList<>(context.typesNeedClone); + missingTypes.removeAll(transform(context.files, VmHostFileVO::getType)); + if (missingTypes.isEmpty()) { + trigger.next(); + return; + } + + context.backupFiles.addAll(Q.New(VmHostBackupFileVO.class) + .eq(VmHostBackupFileVO_.vmInstanceUuid, msg.getSrcVmUuid()) + .in(VmHostFileVO_.type, missingTypes) + .list()); + trigger.next(); + } + }).then(new NoRollbackFlow() { + String __name__ = "copy-host-content-database"; + + @Override + public boolean skip(Map data) { + return context.files.isEmpty() && context.backupFiles.isEmpty(); + } + + @Override + public void run(FlowTrigger trigger, Map data) { + List uuidList = transform(context.files, VmHostFileVO::getUuid); + List filesAfterSyncing = Q.New(VmHostFileVO.class) + .in(VmHostFileVO_.uuid, uuidList) + .list(); + uuidList.addAll(transform(context.backupFiles, VmHostBackupFileVO::getUuid)); + List contents = Q.New(VmHostFileContentVO.class) + .in(VmHostFileContentVO_.uuid, uuidList) + .list(); + + List filesNeedPersists = new ArrayList<>(); + List contentsNeedPersists = new ArrayList<>(); + + Timestamp now = Timestamp.from(Instant.now()); + for (String vmUuid : msg.getDstVmUuidList()) { + for (String uuid : uuidList) { + VmHostFileContentVO srcContent = findOneOrNull(contents, + item -> item.getUuid().equals(uuid)); + if (srcContent == null) { + continue; + } + + VmHostFileVO vmHostFile = findOneOrNull(filesAfterSyncing, + item -> item.getUuid().equals(uuid)); + VmHostBackupFileVO vmHostBackupFile = vmHostFile == null ? + findOneOrNull(context.backupFiles, item -> item.getUuid().equals(uuid)) : null; + DebugUtils.Assert(vmHostFile != null || vmHostBackupFile != null, + "vmHostFile or vmHostBackupFile cannot be null"); + + VmHostBackupFileVO file = new VmHostBackupFileVO(); + file.setUuid(Platform.getUuid()); + file.setVmInstanceUuid(vmUuid); + file.setType(vmHostFile == null ? vmHostBackupFile.getType() : vmHostFile.getType()); + file.setCreateDate(now); + file.setLastOpDate(now); + filesNeedPersists.add(file); + + VmHostFileContentVO content = new VmHostFileContentVO(); + content.setUuid(file.getUuid()); + content.setContent(srcContent.getContent()); + content.setFormat(srcContent.getFormat()); + content.setCreateDate(now); + content.setLastOpDate(now); + contentsNeedPersists.add(content); + } + } + + if (logger.isTraceEnabled()) { + logger.trace(String.format("persist VmHostFileContentVO [uuid=%s]", + transform(contentsNeedPersists, VmHostFileContentVO::getUuid))); + } + + new SQLBatch() { + @Override + protected void scripts() { + if (!filesNeedPersists.isEmpty()) { + databaseFacade.persistCollection(filesNeedPersists); + } + if (!contentsNeedPersists.isEmpty()) { + databaseFacade.persistCollection(contentsNeedPersists); + } + } + }.execute(); + + trigger.next(); + } + }).done(new FlowDoneHandler(msg) { + @Override + public void handle(Map data) { + bus.reply(msg, reply); + } + }).error(new FlowErrorHandler(msg) { + @Override + public void handle(ErrorCode errCode, Map data) { + reply.setError(errCode); + bus.reply(msg, reply); + } + }).start(); + } } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmMsg.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmMsg.java new file mode 100644 index 00000000000..69f0d6e5e16 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmMsg.java @@ -0,0 +1,35 @@ +package org.zstack.kvm.tpm; + +import org.zstack.header.message.NeedReplyMessage; + +import java.util.List; + +public class CloneVmTpmMsg extends NeedReplyMessage { + private String srcVmUuid; + private List dstVmUuidList; + private Boolean resetTpm; + + public String getSrcVmUuid() { + return srcVmUuid; + } + + public void setSrcVmUuid(String srcVmUuid) { + this.srcVmUuid = srcVmUuid; + } + + public List getDstVmUuidList() { + return dstVmUuidList; + } + + public void setDstVmUuidList(List dstVmUuidList) { + this.dstVmUuidList = dstVmUuidList; + } + + public Boolean getResetTpm() { + return resetTpm; + } + + public void setResetTpm(Boolean resetTpm) { + this.resetTpm = resetTpm; + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmReply.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmReply.java new file mode 100644 index 00000000000..b2ad460b8f2 --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/CloneVmTpmReply.java @@ -0,0 +1,18 @@ +package org.zstack.kvm.tpm; + +import org.zstack.header.message.MessageReply; +import org.zstack.header.tpm.entity.TpmInventory; + +import java.util.List; + +public class CloneVmTpmReply extends MessageReply { + private List inventories; + + public List getInventories() { + return inventories; + } + + public void setInventories(List inventories) { + this.inventories = inventories; + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java new file mode 100644 index 00000000000..a2ada84b8ae --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java @@ -0,0 +1,17 @@ +package org.zstack.kvm.tpm; + +import org.zstack.header.core.Completion; +import org.zstack.utils.Utils; +import org.zstack.utils.logging.CLogger; + +public class DummyTpmEncryptedResourceKeyBackend implements TpmEncryptedResourceKeyBackend { + private static final CLogger logger = Utils.getLogger(DummyTpmEncryptedResourceKeyBackend.class); + + @Override + public void cloneEncryptedResourceKey(CloneEncryptedResourceKeyContext context, Completion completion) { + // do nothing + logger.debug("ignore clone encrypted resource key request for TPM uuid " + + context.srcTpmUuid + " -> " + context.dstTpmUuid); + completion.success(); + } +} diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java index 4eb436a4b72..781faf4c186 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java @@ -2,6 +2,7 @@ import org.springframework.beans.factory.annotation.Autowired; import org.zstack.compute.vm.devices.VmTpmManager; +import org.zstack.core.asyncbatch.While; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.cloudbus.CloudBusCallBack; import org.zstack.core.cloudbus.MessageSafe; @@ -13,11 +14,15 @@ import org.zstack.core.workflow.SimpleFlowChain; import org.zstack.header.AbstractService; import org.zstack.header.core.Completion; +import org.zstack.header.core.WhileDoneCompletion; +import org.zstack.header.core.workflow.Flow; import org.zstack.header.core.workflow.FlowDoneHandler; import org.zstack.header.core.workflow.FlowErrorHandler; +import org.zstack.header.core.workflow.FlowRollback; import org.zstack.header.core.workflow.FlowTrigger; import org.zstack.header.core.workflow.NoRollbackFlow; import org.zstack.header.errorcode.ErrorCode; +import org.zstack.header.errorcode.ErrorCodeList; import org.zstack.header.message.APIMessage; import org.zstack.header.message.Message; import org.zstack.header.message.MessageReply; @@ -44,14 +49,17 @@ import org.zstack.header.vm.additions.VmHostFileVO_; import org.zstack.resourceconfig.ResourceConfig; import org.zstack.resourceconfig.ResourceConfigFacade; +import org.zstack.utils.CollectionUtils; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; +import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.zstack.compute.vm.VmGlobalConfig.RESET_TPM_AFTER_VM_CLONE; import static org.zstack.core.Platform.err; +import static org.zstack.core.Platform.operr; import static org.zstack.header.errorcode.SysErrors.NOT_SUPPORTED; import static org.zstack.header.tpm.TpmConstants.*; import static org.zstack.header.tpm.TpmErrors.VM_STATE_ERROR; @@ -60,6 +68,7 @@ import static org.zstack.kvm.KVMSystemTags.SWTPM_VERSION_TOKEN; import static org.zstack.kvm.KVMSystemTags.VM_EDK; import static org.zstack.utils.CollectionDSL.list; +import static org.zstack.utils.CollectionUtils.transform; public class KvmTpmManager extends AbstractService { private static final CLogger logger = Utils.getLogger(KvmTpmManager.class); @@ -72,6 +81,8 @@ public class KvmTpmManager extends AbstractService { private ResourceConfigFacade resourceConfigFacade; @Autowired private VmTpmManager vmTpmManager; + @Autowired + private TpmEncryptedResourceKeyBackend tpmKeyBackend; @Override public boolean start() { @@ -106,6 +117,8 @@ private void handleLocalMessage(Message msg) { handle((AddTpmMsg) msg); } else if (msg instanceof RemoveTpmMsg) { handle((RemoveTpmMsg) msg); + } else if (msg instanceof CloneVmTpmMsg) { + handle((CloneVmTpmMsg) msg); } else { bus.dealWithUnknownMessage(msg); } @@ -309,6 +322,103 @@ public void handle(ErrorCode errorCode, Map data) { }).start(); } + @SuppressWarnings("rawtypes") + private void handle(CloneVmTpmMsg msg) { + CloneVmTpmReply reply = new CloneVmTpmReply(); + + String originTpmUuid = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, msg.getSrcVmUuid()) + .select(TpmVO_.uuid) + .findValue(); + if (originTpmUuid == null) { + bus.reply(msg, reply); + return; + } + + SimpleFlowChain chain = new SimpleFlowChain(); + chain.setName("clone-VM-TPM"); + chain.then(new Flow() { + String __name__ = "persist-TPM-VO"; + + @Override + public void run(FlowTrigger trigger, Map data) { + reply.setInventories(new ArrayList<>()); + for (String dstVmUuid : msg.getDstVmUuidList()) { + TpmVO dstTpm = vmTpmManager.persistTpmVO(null, dstVmUuid); + reply.getInventories().add(TpmInventory.valueOf(dstTpm)); + } + trigger.next(); + } + + @Override + public void rollback(FlowRollback trigger, Map data) { + if (CollectionUtils.isEmpty(reply.getInventories())) { + trigger.rollback(); + return; + } + SQL.New(TpmVO.class) + .in(TpmVO_.uuid, transform(reply.getInventories(), TpmInventory::getUuid)) + .delete(); + trigger.rollback(); + } + }).then(new NoRollbackFlow() { + String __name__ = "clone-encrypted-resource-key-if-needed"; + + @Override + public void run(FlowTrigger trigger, Map data) { + boolean resetTpm; + if (msg.getResetTpm() == null) { + ResourceConfig resourceConfig = resourceConfigFacade.getResourceConfig(RESET_TPM_AFTER_VM_CLONE.getIdentity()); + resetTpm = resourceConfig.getResourceConfigValue(msg.getSrcVmUuid(), Boolean.class); + } else { + resetTpm = msg.getResetTpm(); + } + + new While<>(reply.getInventories()).each((inventory, whileCompletion) -> { + TpmEncryptedResourceKeyBackend.CloneEncryptedResourceKeyContext context = + new TpmEncryptedResourceKeyBackend.CloneEncryptedResourceKeyContext(); + context.srcTpmUuid = originTpmUuid; + context.dstTpmUuid = inventory.getUuid(); + context.resetTpm = resetTpm; + tpmKeyBackend.cloneEncryptedResourceKey(context, new Completion(whileCompletion) { + @Override + public void success() { + whileCompletion.done(); + } + + @Override + public void fail(ErrorCode errorCode) { + whileCompletion.addError(errorCode); + whileCompletion.allDone(); + } + }); + }).run(new WhileDoneCompletion(trigger) { + @Override + public void done(ErrorCodeList errorCodeList) { + if (errorCodeList.isEmpty()) { + trigger.next(); + return; + } + trigger.fail(operr("Failed to clone encrypted resource key") + .withOpaque("src.tpm.uuid", originTpmUuid) + .withCause(errorCodeList)); + } + }); + } + }).done(new FlowDoneHandler(msg) { + @Override + public void handle(Map data) { + bus.reply(msg, reply); + } + }).error(new FlowErrorHandler(msg) { + @Override + public void handle(ErrorCode errCode, Map data) { + reply.setError(errCode); + bus.reply(msg, reply); + } + }).start(); + } + private void handle(APIGetTpmCapabilityMsg msg) { TpmCapabilityView view = new TpmCapabilityView(); diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java new file mode 100644 index 00000000000..4b34eb157fc --- /dev/null +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java @@ -0,0 +1,31 @@ +package org.zstack.kvm.tpm; + +import org.zstack.header.core.Completion; + +/** + * Responsible for handling the replication or reset of encryption resource keys + * and other tasks in VM TPM cloning scenarios. + */ +public interface TpmEncryptedResourceKeyBackend { + static class CloneEncryptedResourceKeyContext { + public String srcTpmUuid; + public String dstTpmUuid; + + /** + * Whether to reset (regenerate) the key on the target TPM. + *
    + *
  • {@code true}:Regenerate the key for the target TPM + * without inheriting the encrypted data from the source TPM.
  • + *
  • {@code false}:Copy the existing keys from the source TPM + * to the target TPM to ensure they remain consistent.
  • + *
+ */ + public boolean resetTpm; + } + + /** + * In a VM cloning scenario, copy or reset the encryption resource key + * from the source TPM to the target TPM. + */ + void cloneEncryptedResourceKey(CloneEncryptedResourceKeyContext context, Completion completion); +} diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 53cf63d951b..a1b8520ffca 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -261,6 +261,7 @@ + @@ -270,4 +271,6 @@
+ + From 54499454bd84994320a787ada2a651bd22b797a3 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 12 Mar 2026 16:35:10 +0800 Subject: [PATCH 72/76] [kvm]: support to start VM with VmHostBackupFileVO Resolves: ZSV-11439 Related: ZSV-11310 Change-Id: I626c6775707a67657570736778766d6b75736570 --- .../kvm/efi/KvmSecureBootExtensions.java | 57 +++++++++++++++---- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 82b2ad202d6..8da8b88b333 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -28,6 +28,8 @@ import org.zstack.header.vm.PreVmInstantiateResourceExtensionPoint; import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstantiateResourceException; +import org.zstack.header.vm.additions.VmHostBackupFileVO; +import org.zstack.header.vm.additions.VmHostBackupFileVO_; import org.zstack.header.vm.additions.VmHostFileContentFormat; import org.zstack.header.vm.additions.VmHostFileContentVO; import org.zstack.header.vm.additions.VmHostFileContentVO_; @@ -414,11 +416,15 @@ public static class PrepareHostFileContext { public String vmUuid; public VmHostFileType type; + public String path; // whether the NvRam is on the same host as before private boolean sameHost = false; - private boolean firstReadSuccess = false; private boolean writeSuccess = false; private VmHostFileVO vmHostFile; + private VmHostBackupFileVO vmBackupFileVO; + + // property: VmHostFileVO (read success) > VmHostFileVO (read fail) > VmHostBackupFileVO + // Note: read VmHostBackupFileVO only if VmHostFileVO is not exist } @SuppressWarnings("rawtypes") @@ -430,7 +436,7 @@ public void prepareHostFileOnHost(PrepareHostFileContext context, Completion com @Override public void run(FlowTrigger trigger, Map data) { - VmHostFileVO vmHostFile = context.vmHostFile = Q.New(VmHostFileVO.class) + VmHostFileVO vmHostFile = Q.New(VmHostFileVO.class) .eq(VmHostFileVO_.type, context.type) .eq(VmHostFileVO_.vmInstanceUuid, context.vmUuid) .orderByDesc(VmHostFileVO_.lastOpDate) @@ -449,9 +455,9 @@ public void run(FlowTrigger trigger, Map data) { syncContext.vmUuid = context.vmUuid; if (vmHostFile.getType() == VmHostFileType.NvRam) { - syncContext.nvRamPath = vmHostFile.getPath(); + context.path = syncContext.nvRamPath = vmHostFile.getPath(); } else if (vmHostFile.getType() == VmHostFileType.TpmState) { - syncContext.tpmStateFolder = vmHostFile.getPath(); + context.path = syncContext.tpmStateFolder = vmHostFile.getPath(); } else { throw new CloudRuntimeException("unsupported vm host file type: " + vmHostFile.getType()); } @@ -459,7 +465,7 @@ public void run(FlowTrigger trigger, Map data) { syncVmHostFilesFromHost(syncContext, new Completion(trigger) { @Override public void success() { - context.firstReadSuccess = true; + context.vmHostFile = vmHostFile; trigger.next(); } @@ -471,18 +477,47 @@ public void fail(ErrorCode errorCode) { } }); } + }).then(new NoRollbackFlow() { + String __name__ = "read-vm-host-file-from-backup"; + + @Override + public boolean skip(Map data) { + return context.vmHostFile != null; + } + + @Override + public void run(FlowTrigger trigger, Map data) { + context.vmBackupFileVO = Q.New(VmHostBackupFileVO.class) + .eq(VmHostBackupFileVO_.type, context.type) + .eq(VmHostBackupFileVO_.vmInstanceUuid, context.vmUuid) + .orderByDesc(VmHostBackupFileVO_.lastOpDate) + .limit(1) + .find(); + if (context.vmBackupFileVO != null) { + logger.debug(String.format("use %s[type=%s] VM-host backup file for VM[uuid=%s]", + context.vmBackupFileVO.getUuid(), context.type, context.vmUuid)); + switch (context.type) { + case NvRam: context.path = buildNvramFilePath(context.vmUuid); break; + case TpmState: context.path = buildTpmStateFilePath(context.vmUuid); break; + } + } + trigger.next(); + } }).then(new NoRollbackFlow() { String __name__ = "write-vm-host-file-to-dest-host"; @Override public boolean skip(Map data) { - return context.vmHostFile == null || (context.sameHost && context.firstReadSuccess); + return (context.vmHostFile == null && context.vmBackupFileVO == null) + || (context.sameHost && context.vmHostFile != null); } @Override public void run(FlowTrigger trigger, Map data) { + String contentUuid = context.vmHostFile == null ? + context.vmBackupFileVO.getUuid() : context.vmHostFile.getUuid(); VmHostFileContentVO content = Q.New(VmHostFileContentVO.class) - .eq(VmHostFileContentVO_.uuid, context.vmHostFile.getUuid()) + .eq(VmHostFileContentVO_.uuid, contentUuid) .find(); if (content == null) { logger.debug(String.format("skip to write vm host file for VM[vmUuid=%s]: file content is not saved in MN", @@ -492,8 +527,8 @@ public void run(FlowTrigger trigger, Map data) { } VmHostFileTO to = new VmHostFileTO(); - to.setPath(context.vmHostFile.getPath()); - to.setType(context.vmHostFile.getType().toString()); + to.setPath(context.path); + to.setType(context.type.toString()); to.setFileFormat(content.getFormat().toString()); String contentBase64 = Base64.getEncoder().encodeToString(content.getContent()); @@ -532,9 +567,9 @@ public void run(FlowTrigger trigger, Map data) { syncBackContext.vmUuid = context.vmUuid; if (context.type == VmHostFileType.NvRam) { - syncBackContext.nvRamPath = context.vmHostFile.getPath(); + syncBackContext.nvRamPath = context.path; } else if (context.type == VmHostFileType.TpmState) { - syncBackContext.tpmStateFolder = context.vmHostFile.getPath(); + syncBackContext.tpmStateFolder = context.path; } syncVmHostFilesFromHost(syncBackContext, new Completion(trigger) { From cf37a84dceb69bdcc92d276a134ba27ad1f6fb35 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Thu, 12 Mar 2026 21:48:35 +0800 Subject: [PATCH 73/76] [kvm]: clean VmHostFileVO after VM destroy Resolves: ZSV-11439 Related: ZSV-11310 Change-Id: I68696b7776656f78677679686370707a70616665 --- .../zstack/compute/vm/VmCascadeExtension.java | 6 ++-- conf/springConfigXml/Kvm.xml | 1 + .../kvm/efi/KvmSecureBootExtensions.java | 31 ++++++++++++++++++- .../test/resources/springConfigXml/Kvm.xml | 1 + 4 files changed, 35 insertions(+), 4 deletions(-) diff --git a/compute/src/main/java/org/zstack/compute/vm/VmCascadeExtension.java b/compute/src/main/java/org/zstack/compute/vm/VmCascadeExtension.java index 05810e8bf53..dabbc36a653 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmCascadeExtension.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmCascadeExtension.java @@ -472,9 +472,9 @@ public void done(ErrorCodeList errorCodeList) { .flatMap(List::stream).map(VmCdRomInventory::getUuid) .collect(Collectors.toList()); dbf.removeByPrimaryKeys(cdRomUuids, VmCdRomVO.class); - dbf.removeByPrimaryKeys(vminvs.stream().map(p -> p.getInventory().getUuid()) - .collect(Collectors.toList()), - VmInstanceVO.class); + + List vmUuidList = transform(vminvs, p -> p.getInventory().getUuid()); + dbf.removeByPrimaryKeys(vmUuidList, VmInstanceVO.class); } completion.success(); diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index d41d947234f..7d4e03f86b2 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -270,6 +270,7 @@ + diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java index 8da8b88b333..9b747956163 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/efi/KvmSecureBootExtensions.java @@ -26,6 +26,8 @@ import org.zstack.header.message.MessageReply; import org.zstack.header.vm.DiskAO; import org.zstack.header.vm.PreVmInstantiateResourceExtensionPoint; +import org.zstack.header.vm.VmInstanceDestroyExtensionPoint; +import org.zstack.header.vm.VmInstanceInventory; import org.zstack.header.vm.VmInstanceSpec; import org.zstack.header.vm.VmInstantiateResourceException; import org.zstack.header.vm.additions.VmHostBackupFileVO; @@ -76,7 +78,8 @@ import static org.zstack.utils.CollectionUtils.transform; public class KvmSecureBootExtensions implements KVMStartVmExtensionPoint, - PreVmInstantiateResourceExtensionPoint { + PreVmInstantiateResourceExtensionPoint, + VmInstanceDestroyExtensionPoint { private static final CLogger logger = Utils.getLogger(KvmSecureBootExtensions.class); @Autowired @@ -750,4 +753,30 @@ public void run(MessageReply reply) { } }); } + + @Override + public String preDestroyVm(VmInstanceInventory inv) { + return null; + } + + @Override + public void beforeDestroyVm(VmInstanceInventory inv) { + // do-nothing + } + + @Override + public void afterDestroyVm(VmInstanceInventory inv) { + String vmUuid = inv.getUuid(); + SQL.New(VmHostFileVO.class) + .eq(VmHostFileVO_.vmInstanceUuid, vmUuid) + .delete(); + SQL.New(VmHostBackupFileVO.class) + .eq(VmHostBackupFileVO_.vmInstanceUuid, vmUuid) + .delete(); + } + + @Override + public void failedToDestroyVm(VmInstanceInventory inv, ErrorCode reason) { + // do-nothing + } } diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index a1b8520ffca..730e9c79cae 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -269,6 +269,7 @@ +
From f1943f71afd86f3ca48e16115a8f22c8425368ce Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Mon, 16 Mar 2026 13:37:08 +0800 Subject: [PATCH 74/76] [header]: fix reference error in VmHostFileContentVO Related: ZSV-11439 Related: ZSV-11310 Change-Id: I65616264756468686477647a70716f72626f6365 --- .../header/vm/additions/VmHostFileContentVO.java | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java index 4995d5b0f38..e08e5a41d70 100644 --- a/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java +++ b/header/src/main/java/org/zstack/header/vm/additions/VmHostFileContentVO.java @@ -1,7 +1,7 @@ package org.zstack.header.vm.additions; -import org.zstack.header.vo.EntityGraph; import org.zstack.header.vo.ForeignKey; +import org.zstack.header.vo.ResourceVO; import org.zstack.header.vo.SoftDeletionCascade; import org.zstack.header.vo.SoftDeletionCascades; @@ -19,17 +19,12 @@ @Entity @Table @SoftDeletionCascades({ - @SoftDeletionCascade(parent = VmHostFileVO.class, joinColumn = "uuid"), + @SoftDeletionCascade(parent = ResourceVO.class, joinColumn = "uuid"), }) -@EntityGraph( - friends = { - @EntityGraph.Neighbour(type = VmHostFileVO.class, myField = "uuid", targetField = "uuid"), - } -) public class VmHostFileContentVO { @Id @Column - @ForeignKey(parentEntityClass = VmHostFileVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) + @ForeignKey(parentEntityClass = ResourceVO.class, onDeleteAction = ForeignKey.ReferenceOption.CASCADE) private String uuid; @Column private byte[] content; From 4ce7c2fcef852b2ad1dac9c8ee31fca415c448ce Mon Sep 17 00:00:00 2001 From: "tao.yang" Date: Fri, 13 Mar 2026 18:41:11 +0800 Subject: [PATCH 75/76] [keyProvider]: reset default key provider after delete DBImpact Resolves: ZSV-11473 Change-Id: I676766646c6568687472776f6c776c6f72777279 --- conf/db/zsv/V5.0.0__schema.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/db/zsv/V5.0.0__schema.sql b/conf/db/zsv/V5.0.0__schema.sql index 64eb64b56b0..69ff903049c 100644 --- a/conf/db/zsv/V5.0.0__schema.sql +++ b/conf/db/zsv/V5.0.0__schema.sql @@ -122,7 +122,7 @@ CREATE TABLE IF NOT EXISTS `zstack`.`EncryptedResourceKeyRefVO` ( `lastOpDate` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `createDate` timestamp NOT NULL DEFAULT '1999-12-31 23:59:59', PRIMARY KEY (`id`), - INDEX `idxEncryptedResourceKeyRefVOResource` (`resourceUuid`, `resourceType`), + INDEX `idxEncryptedResourceKeyRefVOResource` (`resourceType`, `resourceUuid`), INDEX `idxEncryptedResourceKeyRefVOProviderUuid` (`providerUuid`), INDEX `idxEncryptedResourceKeyRefVOProviderName` (`providerName`), CONSTRAINT `fkEncryptedResourceKeyRefVOProviderUuid` FOREIGN KEY (`providerUuid`) REFERENCES `KeyProviderVO` (`uuid`) ON DELETE SET NULL From f049511c2d160f8d2a6b651768d0cc6f54344608 Mon Sep 17 00:00:00 2001 From: Zhang Wenhao Date: Mon, 16 Mar 2026 17:52:26 +0800 Subject: [PATCH 76/76] [compute]: persist resource key when creating VM * A new hook (afterRollbackPersistVmInstanceVO) has been added to the VM rollback path * introduce the TPM key provider management interface and implementation (attach/detach/find) have been introduced and extended Resolves: ZSV-11489 Related: ZSV-11310 Change-Id: I73646c617971626d77726d6f646467746f637075 --- .../compute/vm/VmInstanceManagerImpl.java | 16 ++- .../DummyTpmEncryptedResourceKeyBackend.java | 18 ++- .../TpmEncryptedResourceKeyBackend.java | 20 +++- .../compute/vm/devices/VmTpmExtensions.java | 46 +++++++- .../compute/vm/devices/VmTpmManager.java | 4 + conf/errorCodes/keyProvider.xml | 108 ++++++++++++++++++ conf/springConfigXml/Kvm.xml | 2 - conf/springConfigXml/VmInstanceManager.xml | 28 ++--- .../vm/VmInstanceCreateExtensionPoint.java | 7 ++ .../org/zstack/kvm/tpm/KvmTpmManager.java | 1 + .../test/resources/springConfigXml/Kvm.xml | 2 - 11 files changed, 223 insertions(+), 29 deletions(-) rename {plugin/kvm/src/main/java/org/zstack/kvm/tpm => compute/src/main/java/org/zstack/compute/vm/devices}/DummyTpmEncryptedResourceKeyBackend.java (53%) rename {plugin/kvm/src/main/java/org/zstack/kvm/tpm => compute/src/main/java/org/zstack/compute/vm/devices}/TpmEncryptedResourceKeyBackend.java (64%) create mode 100644 conf/errorCodes/keyProvider.xml diff --git a/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java b/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java index d3a6d6396b2..418cd0fbd5b 100755 --- a/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java +++ b/compute/src/main/java/org/zstack/compute/vm/VmInstanceManagerImpl.java @@ -1214,16 +1214,22 @@ public void setup() { flow(new Flow() { String __name__ = "call-after-persist-vm-extensions"; + List done = new ArrayList<>(); + @Override public void run(FlowTrigger trigger, Map data) { - pluginRgty.getExtensionList(VmInstanceCreateExtensionPoint.class).forEach( - extensionPoint -> extensionPoint.afterPersistVmInstanceVO(finalVo, msg)); + for (VmInstanceCreateExtensionPoint extension : pluginRgty.getExtensionList(VmInstanceCreateExtensionPoint.class)) { + done.add(extension); + extension.afterPersistVmInstanceVO(finalVo, msg); + } trigger.next(); } @Override public void rollback(FlowRollback trigger, Map data) { - // do nothing + Collections.reverse(done); + CollectionUtils.safeForEach(done, + extension -> extension.afterRollbackPersistVmInstanceVO(finalVo, msg)); trigger.rollback(); } }); @@ -1315,7 +1321,7 @@ public void run(FlowTrigger trigger, Map data) { smsg.setRootDiskOfferingUuid(rootDisk.getDiskOfferingUuid()); } else if (rootDisk.getSize() > 0) { dvo = new DiskOfferingVO(); - dvo.setUuid(Platform.getUuid()); + dvo.setUuid(getUuid()); dvo.setAccountUuid(msg.getAccountUuid()); dvo.setDiskSize(rootDisk.getSize()); dvo.setName("for-create-vm-" + finalVo.getUuid()); @@ -1381,7 +1387,7 @@ public void rollback(FlowRollback chain, Map data) { } DestroyVmInstanceMsg dmsg = new DestroyVmInstanceMsg(); dmsg.setVmInstanceUuid(finalVo.getUuid()); - dmsg.setDeletionPolicy(VmInstanceDeletionPolicyManager.VmInstanceDeletionPolicy.Direct); + dmsg.setDeletionPolicy(VmInstanceDeletionPolicy.Direct); bus.makeTargetServiceIdByResourceUuid(dmsg, VmInstanceConstant.SERVICE_ID, finalVo.getUuid()); bus.send(dmsg, new CloudBusCallBack(null) { @Override diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java b/compute/src/main/java/org/zstack/compute/vm/devices/DummyTpmEncryptedResourceKeyBackend.java similarity index 53% rename from plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java rename to compute/src/main/java/org/zstack/compute/vm/devices/DummyTpmEncryptedResourceKeyBackend.java index a2ada84b8ae..911a78809a4 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/DummyTpmEncryptedResourceKeyBackend.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/DummyTpmEncryptedResourceKeyBackend.java @@ -1,4 +1,4 @@ -package org.zstack.kvm.tpm; +package org.zstack.compute.vm.devices; import org.zstack.header.core.Completion; import org.zstack.utils.Utils; @@ -7,6 +7,22 @@ public class DummyTpmEncryptedResourceKeyBackend implements TpmEncryptedResourceKeyBackend { private static final CLogger logger = Utils.getLogger(DummyTpmEncryptedResourceKeyBackend.class); + @Override + public void attachKeyProviderToTpm(String tpmUuid, String keyProviderUuid) { + logger.debug("ignore attach key provider to TPM request for TPM uuid " + tpmUuid + + " and key provider uuid " + keyProviderUuid); + } + + @Override + public void detachKeyProviderFromTpm(String tpmUuid) { + logger.debug("ignore detach key provider from TPM request for TPM uuid " + tpmUuid); + } + + @Override + public String findKeyProviderUuidByTpm(String tpmUuid) { + return null; + } + @Override public void cloneEncryptedResourceKey(CloneEncryptedResourceKeyContext context, Completion completion) { // do nothing diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java b/compute/src/main/java/org/zstack/compute/vm/devices/TpmEncryptedResourceKeyBackend.java similarity index 64% rename from plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java rename to compute/src/main/java/org/zstack/compute/vm/devices/TpmEncryptedResourceKeyBackend.java index 4b34eb157fc..cc043bd36ae 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/TpmEncryptedResourceKeyBackend.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/TpmEncryptedResourceKeyBackend.java @@ -1,4 +1,4 @@ -package org.zstack.kvm.tpm; +package org.zstack.compute.vm.devices; import org.zstack.header.core.Completion; @@ -7,6 +7,24 @@ * and other tasks in VM TPM cloning scenarios. */ public interface TpmEncryptedResourceKeyBackend { + + /** + * Build relationship from {@link org.zstack.header.tpm.entity.TpmVO} to EncryptedResourceKeyRefVO + * Non-async call. + */ + void attachKeyProviderToTpm(String tpmUuid, String keyProviderUuid); + + /** + * Clean relationship from {@link org.zstack.header.tpm.entity.TpmVO} to EncryptedResourceKeyRefVO + * Non-async call. + */ + void detachKeyProviderFromTpm(String tpmUuid); + + /** + * maybe null (when crypto module is not installed) + */ + String findKeyProviderUuidByTpm(String tpmUuid); + static class CloneEncryptedResourceKeyContext { public String srcTpmUuid; public String dstTpmUuid; diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java index 39ce6fcb840..775fa9d6f16 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmExtensions.java @@ -4,6 +4,7 @@ import org.zstack.compute.vm.BuildVmSpecExtensionPoint; import org.zstack.compute.vm.VmSystemTags; import org.zstack.core.db.Q; +import org.zstack.core.db.SQLBatch; import org.zstack.header.tpm.entity.TpmSpec; import org.zstack.header.tpm.entity.TpmVO; import org.zstack.header.tpm.entity.TpmVO_; @@ -25,6 +26,8 @@ public class VmTpmExtensions implements VmInstanceCreateExtensionPoint, private VmTpmManager vmTpmManager; @Autowired private ResourceConfigFacade resourceConfigFacade; + @Autowired + private TpmEncryptedResourceKeyBackend resourceKeyBackend; @Override public void preCreateVmInstance(CreateVmInstanceMsg msg) { @@ -38,17 +41,49 @@ public void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) { return; } - vmTpmManager.persistTpmVO(null, vo.getUuid()); + new SQLBatch() { + @Override + protected void scripts() { + final TpmVO tpm = vmTpmManager.persistTpmVO(null, vo.getUuid()); + final String keyProviderUuid = spec.getTpm().getKeyProviderUuid(); + if (keyProviderUuid != null) { + resourceKeyBackend.attachKeyProviderToTpm(tpm.getUuid(), keyProviderUuid); + } + } + }.execute(); + } + + @Override + public void afterRollbackPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) { + String tpmUuid = Q.New(TpmVO.class) + .eq(TpmVO_.vmInstanceUuid, vo.getUuid()) + .select(TpmVO_.uuid) + .findValue(); + if (tpmUuid == null) { + return; + } + + new SQLBatch() { + @Override + protected void scripts() { + try { + resourceKeyBackend.detachKeyProviderFromTpm(tpmUuid); + } finally { + vmTpmManager.deleteTpmVO(tpmUuid); + } + } + }.execute(); } @Override public void afterBuildVmSpec(VmInstanceSpec spec) { String vmUuid = spec.getVmInventory().getUuid(); - boolean tpmExists = Q.New(TpmVO.class) + String tpmUuid = Q.New(TpmVO.class) .eq(TpmVO_.vmInstanceUuid, vmUuid) - .isExists(); - boolean needRegisterNvRam = tpmExists; + .select(TpmVO_.uuid) + .findValue(); + boolean needRegisterNvRam = tpmUuid != null; if (!needRegisterNvRam) { String bootMode = VmSystemTags.BOOT_MODE.getTokenByResourceUuid(vmUuid, VmSystemTags.BOOT_MODE_TOKEN); if (vmTpmManager.isUefiBootMode(bootMode)) { @@ -64,12 +99,13 @@ public void afterBuildVmSpec(VmInstanceSpec spec) { spec.setNvRamSpec(nvRamSpec); } - if (tpmExists && (spec.getDevicesSpec() == null || spec.getDevicesSpec().getTpm() == null)) { + if (tpmUuid != null && (spec.getDevicesSpec() == null || spec.getDevicesSpec().getTpm() == null)) { VmDevicesSpec devicesSpec = spec.getDevicesSpec() == null ? new VmDevicesSpec() : spec.getDevicesSpec(); spec.setDevicesSpec(devicesSpec); devicesSpec.setTpm(new TpmSpec()); devicesSpec.getTpm().setEnable(true); + devicesSpec.getTpm().setKeyProviderUuid(resourceKeyBackend.findKeyProviderUuidByTpm(tpmUuid)); } } } diff --git a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java index 70ba4beced6..7345e0e49d2 100644 --- a/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java +++ b/compute/src/main/java/org/zstack/compute/vm/devices/VmTpmManager.java @@ -38,6 +38,10 @@ public TpmVO persistTpmVO(String tpmUuid, String vmUuid) { return tpm; } + public void deleteTpmVO(String tpmUuid) { + databaseFacade.removeByPrimaryKey(tpmUuid, TpmVO.class); + } + /** * @param bootMode boot mode, null is Legacy */ diff --git a/conf/errorCodes/keyProvider.xml b/conf/errorCodes/keyProvider.xml new file mode 100644 index 00000000000..12facc8ebf9 --- /dev/null +++ b/conf/errorCodes/keyProvider.xml @@ -0,0 +1,108 @@ + + KP + + + 1000 + ok + + + + 1001 + invalid content + + + + 1002 + internal error + + + + 1500 + backend unavailable + + + + 1506 + socket not found + + + + 1507 + socket not socket + + + + 1700 + kmip connect failed + + + + 1701 + kmip timeout + + + + 1702 + kmip tls handshake failed + + + + 1703 + kmip cert invalid + + + + 1704 + kmip operation failed + + + + 1600 + root key sha256 mismatch + + + + 1601 + root key sha256 file missing + + + + 1602 + zip data required + + + + 1603 + checksum mismatch + + + + 1604 + password invalid + + + + 1605 + root key extension missing + + + + 1900 + name duplicate + + + + 1901 + uuid duplicate + + + + 2000 + TPM related errors + + + + 2101 + TPM already attached key provider + + diff --git a/conf/springConfigXml/Kvm.xml b/conf/springConfigXml/Kvm.xml index 7d4e03f86b2..c8d357c9d37 100755 --- a/conf/springConfigXml/Kvm.xml +++ b/conf/springConfigXml/Kvm.xml @@ -273,6 +273,4 @@ - - diff --git a/conf/springConfigXml/VmInstanceManager.xml b/conf/springConfigXml/VmInstanceManager.xml index 55af623b940..3d84703cca4 100755 --- a/conf/springConfigXml/VmInstanceManager.xml +++ b/conf/springConfigXml/VmInstanceManager.xml @@ -283,16 +283,18 @@ - - - - - - - - - - - - - + + + + + + + + + + + + + + + diff --git a/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java b/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java index 029eaa43334..a9d21d6b8e2 100644 --- a/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java +++ b/header/src/main/java/org/zstack/header/vm/VmInstanceCreateExtensionPoint.java @@ -9,4 +9,11 @@ public interface VmInstanceCreateExtensionPoint { void preCreateVmInstance(CreateVmInstanceMsg msg); default void afterPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) {} + + /** + * Invoked when VM creation rolls back after + * {`@link` `#afterPersistVmInstanceVO`(VmInstanceVO, CreateVmInstanceMsg)} so extensions can + * clean up any state created in that hook. Implementations should be idempotent. + */ + default void afterRollbackPersistVmInstanceVO(VmInstanceVO vo, CreateVmInstanceMsg msg) {} } diff --git a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java index 781faf4c186..f0b1dffb7a9 100644 --- a/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java +++ b/plugin/kvm/src/main/java/org/zstack/kvm/tpm/KvmTpmManager.java @@ -1,6 +1,7 @@ package org.zstack.kvm.tpm; import org.springframework.beans.factory.annotation.Autowired; +import org.zstack.compute.vm.devices.TpmEncryptedResourceKeyBackend; import org.zstack.compute.vm.devices.VmTpmManager; import org.zstack.core.asyncbatch.While; import org.zstack.core.cloudbus.CloudBus; diff --git a/test/src/test/resources/springConfigXml/Kvm.xml b/test/src/test/resources/springConfigXml/Kvm.xml index 730e9c79cae..8883bdb5ae9 100755 --- a/test/src/test/resources/springConfigXml/Kvm.xml +++ b/test/src/test/resources/springConfigXml/Kvm.xml @@ -272,6 +272,4 @@ - -