From a1dcbcbf57777ab3ab73df9946ba223303a7b5b9 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 17:09:34 +0200 Subject: [PATCH 01/12] [ARO-24603] Add centralized vms package for VM size management Introduces pkg/api/util/vms/ with canonical VMSize type, size constants, supported size maps (production and testing/CI), and CI candidate selection via shuffleByCoreTier to spread quota pressure across families. Co-Authored-By: Claude Opus 4.6 --- pkg/api/util/vms/sizes.go | 393 ++++++++++++++++++++++++++++++ pkg/api/util/vms/sizes_test.go | 24 ++ pkg/api/util/vms/types.go | 433 +++++++++++++++++++++++++++++++++ 3 files changed, 850 insertions(+) create mode 100644 pkg/api/util/vms/sizes.go create mode 100644 pkg/api/util/vms/sizes_test.go create mode 100644 pkg/api/util/vms/types.go diff --git a/pkg/api/util/vms/sizes.go b/pkg/api/util/vms/sizes.go new file mode 100644 index 00000000000..aeeb123d6e5 --- /dev/null +++ b/pkg/api/util/vms/sizes.go @@ -0,0 +1,393 @@ +package vms + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "maps" + "math/rand/v2" + "slices" +) + +// Public facing document which lists supported VM Sizes: +// https://learn.microsoft.com/en-us/azure/openshift/support-policies-v4#supported-virtual-machine-sizes + +// To add new instance types, needs Project Management's involvement and instructions are below., +// https://github.com/Azure/ARO-RP/blob/master/docs/adding-new-instance-types.md + +// SupportedMasterVMSizes contains all VM sizes valid for control plane nodes. +var SupportedMasterVMSizes = map[VMSize]VMSizeStruct{ + // General purpose + VMSizeStandardD8sV3: vmSizeStandardD8sV3Struct, + VMSizeStandardD16sV3: vmSizeStandardD16sV3Struct, + VMSizeStandardD32sV3: vmSizeStandardD32sV3Struct, + + VMSizeStandardD8sV4: vmSizeStandardD8sV4Struct, + VMSizeStandardD16sV4: vmSizeStandardD16sV4Struct, + VMSizeStandardD32sV4: vmSizeStandardD32sV4Struct, + + VMSizeStandardD8sV5: vmSizeStandardD8sV5Struct, + VMSizeStandardD16sV5: vmSizeStandardD16sV5Struct, + VMSizeStandardD32sV5: vmSizeStandardD32sV5Struct, + + VMSizeStandardD8asV4: vmSizeStandardD8asV4Struct, + VMSizeStandardD16asV4: vmSizeStandardD16asV4Struct, + VMSizeStandardD32asV4: vmSizeStandardD32asV4Struct, + + VMSizeStandardD8asV5: vmSizeStandardD8asV5Struct, + VMSizeStandardD16asV5: vmSizeStandardD16asV5Struct, + VMSizeStandardD32asV5: vmSizeStandardD32asV5Struct, + + VMSizeStandardD8dsV5: vmSizeStandardD8dsV5Struct, + VMSizeStandardD16dsV5: vmSizeStandardD16dsV5Struct, + VMSizeStandardD32dsV5: vmSizeStandardD32dsV5Struct, + + // Memory optimized + VMSizeStandardE8sV3: vmSizeStandardE8sV3Struct, + VMSizeStandardE16sV3: vmSizeStandardE16sV3Struct, + VMSizeStandardE32sV3: vmSizeStandardE32sV3Struct, + + VMSizeStandardE8sV4: vmSizeStandardE8sV4Struct, + VMSizeStandardE16sV4: vmSizeStandardE16sV4Struct, + VMSizeStandardE20sV4: vmSizeStandardE20sV4Struct, + VMSizeStandardE32sV4: vmSizeStandardE32sV4Struct, + VMSizeStandardE48sV4: vmSizeStandardE48sV4Struct, + VMSizeStandardE64sV4: vmSizeStandardE64sV4Struct, + + VMSizeStandardE8sV5: vmSizeStandardE8sV5Struct, + VMSizeStandardE16sV5: vmSizeStandardE16sV5Struct, + VMSizeStandardE20sV5: vmSizeStandardE20sV5Struct, + VMSizeStandardE32sV5: vmSizeStandardE32sV5Struct, + VMSizeStandardE48sV5: vmSizeStandardE48sV5Struct, + VMSizeStandardE64sV5: vmSizeStandardE64sV5Struct, + VMSizeStandardE96sV5: vmSizeStandardE96sV5Struct, + + VMSizeStandardE4asV4: vmSizeStandardE4asV4Struct, + VMSizeStandardE8asV4: vmSizeStandardE8asV4Struct, + VMSizeStandardE16asV4: vmSizeStandardE16asV4Struct, + VMSizeStandardE20asV4: vmSizeStandardE20asV4Struct, + VMSizeStandardE32asV4: vmSizeStandardE32asV4Struct, + VMSizeStandardE48asV4: vmSizeStandardE48asV4Struct, + VMSizeStandardE64asV4: vmSizeStandardE64asV4Struct, + VMSizeStandardE96asV4: vmSizeStandardE96asV4Struct, + + VMSizeStandardE8asV5: vmSizeStandardE8asV5Struct, + VMSizeStandardE16asV5: vmSizeStandardE16asV5Struct, + VMSizeStandardE20asV5: vmSizeStandardE20asV5Struct, + VMSizeStandardE32asV5: vmSizeStandardE32asV5Struct, + VMSizeStandardE48asV5: vmSizeStandardE48asV5Struct, + VMSizeStandardE64asV5: vmSizeStandardE64asV5Struct, + VMSizeStandardE96asV5: vmSizeStandardE96asV5Struct, + + VMSizeStandardE64isV3: vmSizeStandardE64isV3Struct, + VMSizeStandardE80isV4: vmSizeStandardE80isV4Struct, + VMSizeStandardE80idsV4: vmSizeStandardE80idsV4Struct, + VMSizeStandardE104isV5: vmSizeStandardE104isV5Struct, + VMSizeStandardE104idsV5: vmSizeStandardE104idsV5Struct, + + // Compute optimized + VMSizeStandardF72sV2: vmSizeStandardF72sV2Struct, + + // Memory and compute optimized + VMSizeStandardM128ms: vmSizeStandardM128msStruct, + + VMSizeStandardD4sV6: vmSizeStandardD4sV6Struct, + VMSizeStandardD8sV6: vmSizeStandardD8sV6Struct, + VMSizeStandardD16sV6: vmSizeStandardD16sV6Struct, + VMSizeStandardD32sV6: vmSizeStandardD32sV6Struct, + VMSizeStandardD48sV6: vmSizeStandardD48sV6Struct, + VMSizeStandardD64sV6: vmSizeStandardD64sV6Struct, + VMSizeStandardD96sV6: vmSizeStandardD96sV6Struct, + + VMSizeStandardD4dsV6: vmSizeStandardD4dsV6Struct, + VMSizeStandardD8dsV6: vmSizeStandardD8dsV6Struct, + VMSizeStandardD16dsV6: vmSizeStandardD16dsV6Struct, + VMSizeStandardD32dsV6: vmSizeStandardD32dsV6Struct, + VMSizeStandardD48dsV6: vmSizeStandardD48dsV6Struct, + VMSizeStandardD64dsV6: vmSizeStandardD64dsV6Struct, + VMSizeStandardD96dsV6: vmSizeStandardD96dsV6Struct, +} + +// SupportedWorkerVMSizes contains all VM sizes valid for worker nodes. +var SupportedWorkerVMSizes = map[VMSize]VMSizeStruct{ + // General purpose + VMSizeStandardD4sV3: vmSizeStandardD4sV3Struct, + VMSizeStandardD8sV3: vmSizeStandardD8sV3Struct, + VMSizeStandardD16sV3: vmSizeStandardD16sV3Struct, + VMSizeStandardD32sV3: vmSizeStandardD32sV3Struct, + + VMSizeStandardD4sV4: vmSizeStandardD4sV4Struct, + VMSizeStandardD8sV4: vmSizeStandardD8sV4Struct, + VMSizeStandardD16sV4: vmSizeStandardD16sV4Struct, + VMSizeStandardD32sV4: vmSizeStandardD32sV4Struct, + VMSizeStandardD64sV4: vmSizeStandardD64sV4Struct, + + VMSizeStandardD4sV5: vmSizeStandardD4sV5Struct, + VMSizeStandardD8sV5: vmSizeStandardD8sV5Struct, + VMSizeStandardD16sV5: vmSizeStandardD16sV5Struct, + VMSizeStandardD32sV5: vmSizeStandardD32sV5Struct, + VMSizeStandardD64sV5: vmSizeStandardD64sV5Struct, + VMSizeStandardD96sV5: vmSizeStandardD96sV5Struct, + + VMSizeStandardD4asV4: vmSizeStandardD4asV4Struct, + VMSizeStandardD8asV4: vmSizeStandardD8asV4Struct, + VMSizeStandardD16asV4: vmSizeStandardD16asV4Struct, + VMSizeStandardD32asV4: vmSizeStandardD32asV4Struct, + VMSizeStandardD64asV4: vmSizeStandardD64asV4Struct, + VMSizeStandardD96asV4: vmSizeStandardD96asV4Struct, + + VMSizeStandardD4asV5: vmSizeStandardD4asV5Struct, + VMSizeStandardD8asV5: vmSizeStandardD8asV5Struct, + VMSizeStandardD16asV5: vmSizeStandardD16asV5Struct, + VMSizeStandardD32asV5: vmSizeStandardD32asV5Struct, + VMSizeStandardD64asV5: vmSizeStandardD64asV5Struct, + VMSizeStandardD96asV5: vmSizeStandardD96asV5Struct, + + VMSizeStandardD4dsV5: vmSizeStandardD4dsV5Struct, + VMSizeStandardD8dsV5: vmSizeStandardD8dsV5Struct, + VMSizeStandardD16dsV5: vmSizeStandardD16dsV5Struct, + VMSizeStandardD32dsV5: vmSizeStandardD32dsV5Struct, + VMSizeStandardD64dsV5: vmSizeStandardD64dsV5Struct, + VMSizeStandardD96dsV5: vmSizeStandardD96dsV5Struct, + + // Memory optimized + VMSizeStandardE4sV3: vmSizeStandardE4sV3Struct, + VMSizeStandardE8sV3: vmSizeStandardE8sV3Struct, + VMSizeStandardE16sV3: vmSizeStandardE16sV3Struct, + VMSizeStandardE32sV3: vmSizeStandardE32sV3Struct, + + VMSizeStandardE2sV4: vmSizeStandardE2sV4Struct, + VMSizeStandardE4sV4: vmSizeStandardE4sV4Struct, + VMSizeStandardE8sV4: vmSizeStandardE8sV4Struct, + VMSizeStandardE16sV4: vmSizeStandardE16sV4Struct, + VMSizeStandardE20sV4: vmSizeStandardE20sV4Struct, + VMSizeStandardE32sV4: vmSizeStandardE32sV4Struct, + VMSizeStandardE48sV4: vmSizeStandardE48sV4Struct, + VMSizeStandardE64sV4: vmSizeStandardE64sV4Struct, + + VMSizeStandardE2sV5: vmSizeStandardE2sV5Struct, + VMSizeStandardE4sV5: vmSizeStandardE4sV5Struct, + VMSizeStandardE8sV5: vmSizeStandardE8sV5Struct, + VMSizeStandardE16sV5: vmSizeStandardE16sV5Struct, + VMSizeStandardE20sV5: vmSizeStandardE20sV5Struct, + VMSizeStandardE32sV5: vmSizeStandardE32sV5Struct, + VMSizeStandardE48sV5: vmSizeStandardE48sV5Struct, + VMSizeStandardE64sV5: vmSizeStandardE64sV5Struct, + VMSizeStandardE96sV5: vmSizeStandardE96sV5Struct, + + VMSizeStandardE4asV4: vmSizeStandardE4asV4Struct, + VMSizeStandardE8asV4: vmSizeStandardE8asV4Struct, + VMSizeStandardE16asV4: vmSizeStandardE16asV4Struct, + VMSizeStandardE20asV4: vmSizeStandardE20asV4Struct, + VMSizeStandardE32asV4: vmSizeStandardE32asV4Struct, + VMSizeStandardE48asV4: vmSizeStandardE48asV4Struct, + VMSizeStandardE64asV4: vmSizeStandardE64asV4Struct, + VMSizeStandardE96asV4: vmSizeStandardE96asV4Struct, + + VMSizeStandardE8asV5: vmSizeStandardE8asV5Struct, + VMSizeStandardE16asV5: vmSizeStandardE16asV5Struct, + VMSizeStandardE20asV5: vmSizeStandardE20asV5Struct, + VMSizeStandardE32asV5: vmSizeStandardE32asV5Struct, + VMSizeStandardE48asV5: vmSizeStandardE48asV5Struct, + VMSizeStandardE64asV5: vmSizeStandardE64asV5Struct, + VMSizeStandardE96asV5: vmSizeStandardE96asV5Struct, + + VMSizeStandardE64isV3: vmSizeStandardE64isV3Struct, + VMSizeStandardE80isV4: vmSizeStandardE80isV4Struct, + VMSizeStandardE80idsV4: vmSizeStandardE80idsV4Struct, + VMSizeStandardE104isV5: vmSizeStandardE104isV5Struct, + VMSizeStandardE104idsV5: vmSizeStandardE104idsV5Struct, + + // Compute optimized + VMSizeStandardF4sV2: vmSizeStandardF4sV2Struct, + VMSizeStandardF8sV2: vmSizeStandardF8sV2Struct, + VMSizeStandardF16sV2: vmSizeStandardF16sV2Struct, + VMSizeStandardF32sV2: vmSizeStandardF32sV2Struct, + VMSizeStandardF72sV2: vmSizeStandardF72sV2Struct, + + // Memory and compute optimized + VMSizeStandardM128ms: vmSizeStandardM128msStruct, + + // Storage optimized + VMSizeStandardL4s: vmSizeStandardL4sStruct, + VMSizeStandardL8s: vmSizeStandardL8sStruct, + VMSizeStandardL16s: vmSizeStandardL16sStruct, + VMSizeStandardL32s: vmSizeStandardL32sStruct, + + VMSizeStandardL8sV2: vmSizeStandardL8sV2Struct, + VMSizeStandardL16sV2: vmSizeStandardL16sV2Struct, + VMSizeStandardL32sV2: vmSizeStandardL32sV2Struct, + VMSizeStandardL48sV2: vmSizeStandardL48sV2Struct, + VMSizeStandardL64sV2: vmSizeStandardL64sV2Struct, + + VMSizeStandardL8sV3: vmSizeStandardL8sV3Struct, + VMSizeStandardL16sV3: vmSizeStandardL16sV3Struct, + VMSizeStandardL32sV3: vmSizeStandardL32sV3Struct, + VMSizeStandardL48sV3: vmSizeStandardL48sV3Struct, + VMSizeStandardL64sV3: vmSizeStandardL64sV3Struct, + + VMSizeStandardL4sV4: vmSizeStandardL4sV4Struct, + VMSizeStandardL8sV4: vmSizeStandardL8sV4Struct, + VMSizeStandardL16sV4: vmSizeStandardL16sV4Struct, + VMSizeStandardL32sV4: vmSizeStandardL32sV4Struct, + VMSizeStandardL48sV4: vmSizeStandardL48sV4Struct, + VMSizeStandardL64sV4: vmSizeStandardL64sV4Struct, + VMSizeStandardL80sV4: vmSizeStandardL80sV4Struct, + + // GPU nodes + // the formatting of the ncasv3_t4 family is different. This can be seen through a + // az vm list-usage -l eastus + VMSizeStandardNC4asT4V3: vmSizeStandardNC4asT4V3Struct, + VMSizeStandardNC8asT4V3: vmSizeStandardNC8asT4V3Struct, + VMSizeStandardNC16asT4V3: vmSizeStandardNC16asT4V3Struct, + VMSizeStandardNC64asT4V3: vmSizeStandardNC64asT4V3Struct, + + VMSizeStandardNC6sV3: vmSizeStandardNC6sV3Struct, + VMSizeStandardNC12sV3: vmSizeStandardNC12sV3Struct, + VMSizeStandardNC24sV3: vmSizeStandardNC24sV3Struct, + VMSizeStandardNC24rsV3: vmSizeStandardNC24rsV3Struct, + + VMSizeStandardD4sV6: vmSizeStandardD4sV6Struct, + VMSizeStandardD8sV6: vmSizeStandardD8sV6Struct, + VMSizeStandardD16sV6: vmSizeStandardD16sV6Struct, + VMSizeStandardD32sV6: vmSizeStandardD32sV6Struct, + VMSizeStandardD48sV6: vmSizeStandardD48sV6Struct, + VMSizeStandardD64sV6: vmSizeStandardD64sV6Struct, + VMSizeStandardD96sV6: vmSizeStandardD96sV6Struct, + + VMSizeStandardD4dsV6: vmSizeStandardD4dsV6Struct, + VMSizeStandardD8dsV6: vmSizeStandardD8dsV6Struct, + VMSizeStandardD16dsV6: vmSizeStandardD16dsV6Struct, + VMSizeStandardD32dsV6: vmSizeStandardD32dsV6Struct, + VMSizeStandardD48dsV6: vmSizeStandardD48dsV6Struct, + VMSizeStandardD64dsV6: vmSizeStandardD64dsV6Struct, + VMSizeStandardD96dsV6: vmSizeStandardD96dsV6Struct, + + VMSizeStandardD4lsV6: vmSizeStandardD4lsV6Struct, + VMSizeStandardD8lsV6: vmSizeStandardD8lsV6Struct, + VMSizeStandardD16lsV6: vmSizeStandardD16lsV6Struct, + VMSizeStandardD32lsV6: vmSizeStandardD32lsV6Struct, + VMSizeStandardD48lsV6: vmSizeStandardD48lsV6Struct, + VMSizeStandardD64lsV6: vmSizeStandardD64lsV6Struct, + VMSizeStandardD96lsV6: vmSizeStandardD96lsV6Struct, + + VMSizeStandardD4ldsV6: vmSizeStandardD4ldsV6Struct, + VMSizeStandardD8ldsV6: vmSizeStandardD8ldsV6Struct, + VMSizeStandardD16ldsV6: vmSizeStandardD16ldsV6Struct, + VMSizeStandardD32ldsV6: vmSizeStandardD32ldsV6Struct, + VMSizeStandardD48ldsV6: vmSizeStandardD48ldsV6Struct, + VMSizeStandardD64ldsV6: vmSizeStandardD64ldsV6Struct, + VMSizeStandardD96ldsV6: vmSizeStandardD96ldsV6Struct, +} + +// SupportedMasterVMSizesForTesting contains small master VM sizes used +// in CI/dev. Initialized with these entries then merged with SupportedMasterVMSizes in init(). +var SupportedMasterVMSizesForTesting = map[VMSize]VMSizeStruct{ + VMSizeStandardD4sV3: vmSizeStandardD4sV3Struct, + VMSizeStandardD4sV4: vmSizeStandardD4sV4Struct, + VMSizeStandardD4sV5: vmSizeStandardD4sV5Struct, + VMSizeStandardD4sV6: vmSizeStandardD4sV6Struct, +} + +// SupportedWorkerVMSizesForTesting contains small worker VM sizes used +// in CI/dev. Initialized with these entries then merged with SupportedWorkerVMSizes in init(). +var SupportedWorkerVMSizesForTesting = map[VMSize]VMSizeStruct{ + VMSizeStandardD2sV3: vmSizeStandardD2sV3Struct, + VMSizeStandardD2sV4: vmSizeStandardD2sV4Struct, + VMSizeStandardD2sV5: vmSizeStandardD2sV5Struct, + VMSizeStandardD2sV6: vmSizeStandardD2sV6Struct, +} + +// SupportedVMSizesByRole maps each VMRole to its production-supported VM sizes. +var SupportedVMSizesByRole = map[VMRole]map[VMSize]VMSizeStruct{ + VMRoleMaster: SupportedMasterVMSizes, + VMRoleWorker: SupportedWorkerVMSizes, +} + +// SupportedVMSizesByRoleForTesting maps each VMRole to its CI/dev VM sizes +// (production sizes plus smaller testing-only sizes). +var SupportedVMSizesByRoleForTesting = map[VMRole]map[VMSize]VMSizeStruct{ + VMRoleMaster: SupportedMasterVMSizesForTesting, + VMRoleWorker: SupportedWorkerVMSizesForTesting, +} + +func init() { + maps.Copy(SupportedMasterVMSizesForTesting, SupportedMasterVMSizes) + maps.Copy(SupportedWorkerVMSizesForTesting, SupportedWorkerVMSizes) +} + +// minMasterVMSizes contains the smallest supported master VM size for each +// general-purpose D-series family. Used by test/CI/dev tooling to select +// cost-effective sizes while spreading quota across families. +var minMasterVMSizes = map[VMSize]VMSizeStruct{ + VMSizeStandardD8sV3: vmSizeStandardD8sV3Struct, + VMSizeStandardD8sV4: vmSizeStandardD8sV4Struct, + VMSizeStandardD8sV5: vmSizeStandardD8sV5Struct, + VMSizeStandardD8asV4: vmSizeStandardD8asV4Struct, + VMSizeStandardD8asV5: vmSizeStandardD8asV5Struct, + VMSizeStandardD8dsV5: vmSizeStandardD8dsV5Struct, +} + +// minWorkerVMSizes contains the smallest supported worker VM size for each +// general-purpose D-series family. +var minWorkerVMSizes = map[VMSize]VMSizeStruct{ + VMSizeStandardD2sV5: vmSizeStandardD2sV5Struct, + VMSizeStandardD4sV3: vmSizeStandardD4sV3Struct, + VMSizeStandardD4sV4: vmSizeStandardD4sV4Struct, + VMSizeStandardD4sV5: vmSizeStandardD4sV5Struct, + VMSizeStandardD4asV4: vmSizeStandardD4asV4Struct, + VMSizeStandardD4asV5: vmSizeStandardD4asV5Struct, + VMSizeStandardD4dsV5: vmSizeStandardD4dsV5Struct, +} + +// LookupVMSize returns the VMSizeStruct for a given VMSize by searching +// all supported size maps (including internal-user sizes). +func LookupVMSize(vmSize VMSize) (VMSizeStruct, bool) { + if s, ok := SupportedWorkerVMSizes[vmSize]; ok { + return s, true + } + if s, ok := SupportedMasterVMSizes[vmSize]; ok { + return s, true + } + if s, ok := SupportedWorkerVMSizesForTesting[vmSize]; ok { + return s, true + } + if s, ok := SupportedMasterVMSizesForTesting[vmSize]; ok { + return s, true + } + return VMSizeStruct{}, false +} + +// GetCICandidateMasterVMSizes returns the minimum master VM sizes, shuffled +// within each core-count tier to spread quota pressure in CI. +func GetCICandidateMasterVMSizes() []VMSize { + return shuffleByCoreTier(minMasterVMSizes) +} + +// GetCICandidateWorkerVMSizes returns the minimum worker VM sizes, shuffled +// within each core-count tier to spread quota pressure in CI. +func GetCICandidateWorkerVMSizes() []VMSize { + return shuffleByCoreTier(minWorkerVMSizes) +} + +// shuffleByCoreTier groups VM sizes by core count (cheapest first), +// shuffles within each tier to spread quota pressure, then concatenates +// the tiers in ascending order. +func shuffleByCoreTier(sizeMap map[VMSize]VMSizeStruct) []VMSize { + tiers := map[int][]VMSize{} + for sz, info := range sizeMap { + tiers[info.CoreCount] = append(tiers[info.CoreCount], sz) + } + + coreCounts := slices.Sorted(maps.Keys(tiers)) + + result := make([]VMSize, 0, len(sizeMap)) + for _, cc := range coreCounts { + tier := tiers[cc] + rand.Shuffle(len(tier), func(i, j int) { + tier[i], tier[j] = tier[j], tier[i] + }) + result = append(result, tier...) + } + return result +} diff --git a/pkg/api/util/vms/sizes_test.go b/pkg/api/util/vms/sizes_test.go new file mode 100644 index 00000000000..9eb083afcea --- /dev/null +++ b/pkg/api/util/vms/sizes_test.go @@ -0,0 +1,24 @@ +package vms + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "testing" +) + +func TestMinMasterVMSizesAreSupported(t *testing.T) { + for size := range minMasterVMSizes { + if _, ok := SupportedMasterVMSizes[size]; !ok { + t.Errorf("minMasterVMSizes entry %s is not in SupportedMasterVMSizes", size) + } + } +} + +func TestMinWorkerVMSizesAreSupported(t *testing.T) { + for size := range minWorkerVMSizes { + if _, ok := SupportedWorkerVMSizesForTesting[size]; !ok { + t.Errorf("minWorkerVMSizes entry %s is not in supportedWorkerVMSizesForInternalUser", size) + } + } +} diff --git a/pkg/api/util/vms/types.go b/pkg/api/util/vms/types.go new file mode 100644 index 00000000000..d98d88b96f6 --- /dev/null +++ b/pkg/api/util/vms/types.go @@ -0,0 +1,433 @@ +package vms + +import ( + "encoding/json" + + "github.com/Azure/ARO-RP/pkg/api/util/version" +) + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +// VMRole represents a VM role = [master, worker] +type VMRole string + +const ( + VMRoleMaster VMRole = "master" + VMRoleWorker VMRole = "worker" +) + +// VMSize represents a VM size +type VMSize string + +func (vmSize VMSize) String() string { + return string(vmSize) +} + +// VMSize constants +// add required resources in pkg/validate/dynamic/quota.go when adding a new VMSize +const ( + VMSizeStandardD2sV3 VMSize = "Standard_D2s_v3" + VMSizeStandardD4sV3 VMSize = "Standard_D4s_v3" + VMSizeStandardD8sV3 VMSize = "Standard_D8s_v3" + VMSizeStandardD16sV3 VMSize = "Standard_D16s_v3" + VMSizeStandardD32sV3 VMSize = "Standard_D32s_v3" + + VMSizeStandardD2sV4 VMSize = "Standard_D2s_v4" + VMSizeStandardD4sV4 VMSize = "Standard_D4s_v4" + VMSizeStandardD8sV4 VMSize = "Standard_D8s_v4" + VMSizeStandardD16sV4 VMSize = "Standard_D16s_v4" + VMSizeStandardD32sV4 VMSize = "Standard_D32s_v4" + VMSizeStandardD64sV4 VMSize = "Standard_D64s_v4" + + VMSizeStandardD2sV5 VMSize = "Standard_D2s_v5" + VMSizeStandardD4sV5 VMSize = "Standard_D4s_v5" + VMSizeStandardD8sV5 VMSize = "Standard_D8s_v5" + VMSizeStandardD16sV5 VMSize = "Standard_D16s_v5" + VMSizeStandardD32sV5 VMSize = "Standard_D32s_v5" + VMSizeStandardD64sV5 VMSize = "Standard_D64s_v5" + VMSizeStandardD96sV5 VMSize = "Standard_D96s_v5" + + VMSizeStandardD4asV4 VMSize = "Standard_D4as_v4" + VMSizeStandardD8asV4 VMSize = "Standard_D8as_v4" + VMSizeStandardD16asV4 VMSize = "Standard_D16as_v4" + VMSizeStandardD32asV4 VMSize = "Standard_D32as_v4" + VMSizeStandardD64asV4 VMSize = "Standard_D64as_v4" + VMSizeStandardD96asV4 VMSize = "Standard_D96as_v4" + + VMSizeStandardD4asV5 VMSize = "Standard_D4as_v5" + VMSizeStandardD8asV5 VMSize = "Standard_D8as_v5" + VMSizeStandardD16asV5 VMSize = "Standard_D16as_v5" + VMSizeStandardD32asV5 VMSize = "Standard_D32as_v5" + VMSizeStandardD64asV5 VMSize = "Standard_D64as_v5" + VMSizeStandardD96asV5 VMSize = "Standard_D96as_v5" + + VMSizeStandardD4dsV5 VMSize = "Standard_D4ds_v5" + VMSizeStandardD8dsV5 VMSize = "Standard_D8ds_v5" + VMSizeStandardD16dsV5 VMSize = "Standard_D16ds_v5" + VMSizeStandardD32dsV5 VMSize = "Standard_D32ds_v5" + VMSizeStandardD64dsV5 VMSize = "Standard_D64ds_v5" + VMSizeStandardD96dsV5 VMSize = "Standard_D96ds_v5" + + VMSizeStandardD2sV6 VMSize = "Standard_D2s_v6" + VMSizeStandardD4sV6 VMSize = "Standard_D4s_v6" + VMSizeStandardD8sV6 VMSize = "Standard_D8s_v6" + VMSizeStandardD16sV6 VMSize = "Standard_D16s_v6" + VMSizeStandardD32sV6 VMSize = "Standard_D32s_v6" + VMSizeStandardD48sV6 VMSize = "Standard_D48s_v6" + VMSizeStandardD64sV6 VMSize = "Standard_D64s_v6" + VMSizeStandardD96sV6 VMSize = "Standard_D96s_v6" + + VMSizeStandardD4dsV6 VMSize = "Standard_D4ds_v6" + VMSizeStandardD8dsV6 VMSize = "Standard_D8ds_v6" + VMSizeStandardD16dsV6 VMSize = "Standard_D16ds_v6" + VMSizeStandardD32dsV6 VMSize = "Standard_D32ds_v6" + VMSizeStandardD48dsV6 VMSize = "Standard_D48ds_v6" + VMSizeStandardD64dsV6 VMSize = "Standard_D64ds_v6" + VMSizeStandardD96dsV6 VMSize = "Standard_D96ds_v6" + + VMSizeStandardE4sV3 VMSize = "Standard_E4s_v3" + VMSizeStandardE8sV3 VMSize = "Standard_E8s_v3" + VMSizeStandardE16sV3 VMSize = "Standard_E16s_v3" + VMSizeStandardE32sV3 VMSize = "Standard_E32s_v3" + + VMSizeStandardE2sV4 VMSize = "Standard_E2s_v4" + VMSizeStandardE4sV4 VMSize = "Standard_E4s_v4" + VMSizeStandardE8sV4 VMSize = "Standard_E8s_v4" + VMSizeStandardE16sV4 VMSize = "Standard_E16s_v4" + VMSizeStandardE20sV4 VMSize = "Standard_E20s_v4" + VMSizeStandardE32sV4 VMSize = "Standard_E32s_v4" + VMSizeStandardE48sV4 VMSize = "Standard_E48s_v4" + VMSizeStandardE64sV4 VMSize = "Standard_E64s_v4" + + VMSizeStandardE2sV5 VMSize = "Standard_E2s_v5" + VMSizeStandardE4sV5 VMSize = "Standard_E4s_v5" + VMSizeStandardE8sV5 VMSize = "Standard_E8s_v5" + VMSizeStandardE16sV5 VMSize = "Standard_E16s_v5" + VMSizeStandardE20sV5 VMSize = "Standard_E20s_v5" + VMSizeStandardE32sV5 VMSize = "Standard_E32s_v5" + VMSizeStandardE48sV5 VMSize = "Standard_E48s_v5" + VMSizeStandardE64sV5 VMSize = "Standard_E64s_v5" + VMSizeStandardE96sV5 VMSize = "Standard_E96s_v5" + + VMSizeStandardE4asV4 VMSize = "Standard_E4as_v4" + VMSizeStandardE8asV4 VMSize = "Standard_E8as_v4" + VMSizeStandardE16asV4 VMSize = "Standard_E16as_v4" + VMSizeStandardE20asV4 VMSize = "Standard_E20as_v4" + VMSizeStandardE32asV4 VMSize = "Standard_E32as_v4" + VMSizeStandardE48asV4 VMSize = "Standard_E48as_v4" + VMSizeStandardE64asV4 VMSize = "Standard_E64as_v4" + VMSizeStandardE96asV4 VMSize = "Standard_E96as_v4" + + VMSizeStandardE8asV5 VMSize = "Standard_E8as_v5" + VMSizeStandardE16asV5 VMSize = "Standard_E16as_v5" + VMSizeStandardE20asV5 VMSize = "Standard_E20as_v5" + VMSizeStandardE32asV5 VMSize = "Standard_E32as_v5" + VMSizeStandardE48asV5 VMSize = "Standard_E48as_v5" + VMSizeStandardE64asV5 VMSize = "Standard_E64as_v5" + VMSizeStandardE96asV5 VMSize = "Standard_E96as_v5" + + VMSizeStandardE64isV3 VMSize = "Standard_E64is_v3" + VMSizeStandardE80isV4 VMSize = "Standard_E80is_v4" + VMSizeStandardE80idsV4 VMSize = "Standard_E80ids_v4" + VMSizeStandardE96dsV5 VMSize = "Standard_E96ds_v5" + VMSizeStandardE104isV5 VMSize = "Standard_E104is_v5" + VMSizeStandardE104idsV5 VMSize = "Standard_E104ids_v5" + + VMSizeStandardF4sV2 VMSize = "Standard_F4s_v2" + VMSizeStandardF8sV2 VMSize = "Standard_F8s_v2" + VMSizeStandardF16sV2 VMSize = "Standard_F16s_v2" + VMSizeStandardF32sV2 VMSize = "Standard_F32s_v2" + VMSizeStandardF72sV2 VMSize = "Standard_F72s_v2" + + VMSizeStandardM128ms VMSize = "Standard_M128ms" + + VMSizeStandardL4s VMSize = "Standard_L4s" + VMSizeStandardL8s VMSize = "Standard_L8s" + VMSizeStandardL16s VMSize = "Standard_L16s" + VMSizeStandardL32s VMSize = "Standard_L32s" + + VMSizeStandardL8sV2 VMSize = "Standard_L8s_v2" + VMSizeStandardL16sV2 VMSize = "Standard_L16s_v2" + VMSizeStandardL32sV2 VMSize = "Standard_L32s_v2" + VMSizeStandardL48sV2 VMSize = "Standard_L48s_v2" + VMSizeStandardL64sV2 VMSize = "Standard_L64s_v2" + + VMSizeStandardL8sV3 VMSize = "Standard_L8s_v3" + VMSizeStandardL16sV3 VMSize = "Standard_L16s_v3" + VMSizeStandardL32sV3 VMSize = "Standard_L32s_v3" + VMSizeStandardL48sV3 VMSize = "Standard_L48s_v3" + VMSizeStandardL64sV3 VMSize = "Standard_L64s_v3" + + VMSizeStandardL4sV4 VMSize = "Standard_L4s_v4" + VMSizeStandardL8sV4 VMSize = "Standard_L8s_v4" + VMSizeStandardL16sV4 VMSize = "Standard_L16s_v4" + VMSizeStandardL32sV4 VMSize = "Standard_L32s_v4" + VMSizeStandardL48sV4 VMSize = "Standard_L48s_v4" + VMSizeStandardL64sV4 VMSize = "Standard_L64s_v4" + VMSizeStandardL80sV4 VMSize = "Standard_L80s_v4" + + VMSizeStandardD4lsV6 VMSize = "Standard_D4ls_v6" + VMSizeStandardD8lsV6 VMSize = "Standard_D8ls_v6" + VMSizeStandardD16lsV6 VMSize = "Standard_D16ls_v6" + VMSizeStandardD32lsV6 VMSize = "Standard_D32ls_v6" + VMSizeStandardD48lsV6 VMSize = "Standard_D48ls_v6" + VMSizeStandardD64lsV6 VMSize = "Standard_D64ls_v6" + VMSizeStandardD96lsV6 VMSize = "Standard_D96ls_v6" + + VMSizeStandardD4ldsV6 VMSize = "Standard_D4lds_v6" + VMSizeStandardD8ldsV6 VMSize = "Standard_D8lds_v6" + VMSizeStandardD16ldsV6 VMSize = "Standard_D16lds_v6" + VMSizeStandardD32ldsV6 VMSize = "Standard_D32lds_v6" + VMSizeStandardD48ldsV6 VMSize = "Standard_D48lds_v6" + VMSizeStandardD64ldsV6 VMSize = "Standard_D64lds_v6" + VMSizeStandardD96ldsV6 VMSize = "Standard_D96lds_v6" + + // GPU VMs + VMSizeStandardNC4asT4V3 VMSize = "Standard_NC4as_T4_v3" + VMSizeStandardNC8asT4V3 VMSize = "Standard_NC8as_T4_v3" + VMSizeStandardNC16asT4V3 VMSize = "Standard_NC16as_T4_v3" + VMSizeStandardNC64asT4V3 VMSize = "Standard_NC64as_T4_v3" + + VMSizeStandardNC6sV3 VMSize = "Standard_NC6s_v3" + VMSizeStandardNC12sV3 VMSize = "Standard_NC12s_v3" + VMSizeStandardNC24sV3 VMSize = "Standard_NC24s_v3" + VMSizeStandardNC24rsV3 VMSize = "Standard_NC24rs_v3" +) + +var ver419 = version.NewVersion(4, 19, 0) + +// VMSizeStruct holds metadata for a VM size: core count, family, and +// optional minimum OpenShift version requirement. +type VMSizeStruct struct { + CoreCount int `json:"coreCount,omitempty"` + Family VMFamily `json:"family,omitempty"` + MinimumVersion version.Version `json:"minimumVersion,omitempty"` +} + +func (v *VMSizeStruct) UnmarshalJSON(data []byte) error { + type alias VMSizeStruct + aux := &struct { + MinimumVersion string `json:"minimumVersion,omitempty"` + *alias + }{ + alias: (*alias)(v), + } + if err := json.Unmarshal(data, aux); err != nil { + return err + } + if aux.MinimumVersion != "" { + parsed, err := version.ParseVersion(aux.MinimumVersion) + if err != nil { + return err + } + v.MinimumVersion = parsed + } + return nil +} + +var ( + vmSizeStandardD2sV3Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv3} + vmSizeStandardD4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv3} + vmSizeStandardD8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv3} + vmSizeStandardD16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv3} + vmSizeStandardD32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv3} + + vmSizeStandardD2sV4Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv4} + vmSizeStandardD4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv4} + vmSizeStandardD8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv4} + vmSizeStandardD16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv4} + vmSizeStandardD32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv4} + vmSizeStandardD64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv4} + + vmSizeStandardD2sV5Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv5} + vmSizeStandardD4sV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv5} + vmSizeStandardD8sV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv5} + vmSizeStandardD16sV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv5} + vmSizeStandardD32sV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv5} + vmSizeStandardD64sV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv5} + vmSizeStandardD96sV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDSv5} + + vmSizeStandardD4asV4Struct = VMSizeStruct{CoreCount: 4, Family: standardDASv4} + vmSizeStandardD8asV4Struct = VMSizeStruct{CoreCount: 8, Family: standardDASv4} + vmSizeStandardD16asV4Struct = VMSizeStruct{CoreCount: 16, Family: standardDASv4} + vmSizeStandardD32asV4Struct = VMSizeStruct{CoreCount: 32, Family: standardDASv4} + vmSizeStandardD64asV4Struct = VMSizeStruct{CoreCount: 64, Family: standardDASv4} + vmSizeStandardD96asV4Struct = VMSizeStruct{CoreCount: 96, Family: standardDASv4} + + vmSizeStandardD4asV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDASv5} + vmSizeStandardD8asV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDASv5} + vmSizeStandardD16asV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDASv5} + vmSizeStandardD32asV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDASv5} + vmSizeStandardD64asV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDASv5} + vmSizeStandardD96asV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDASv5} + + vmSizeStandardD4dsV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDDSv5} + vmSizeStandardD8dsV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDDSv5} + vmSizeStandardD16dsV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDDSv5} + vmSizeStandardD32dsV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDDSv5} + vmSizeStandardD64dsV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDDSv5} + vmSizeStandardD96dsV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDDSv5} + + vmSizeStandardD2sV6Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD4sV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD8sV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD16sV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD32sV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD48sV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD64sV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv6, MinimumVersion: ver419} + vmSizeStandardD96sV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDSv6, MinimumVersion: ver419} + + vmSizeStandardD4dsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD8dsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD16dsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD32dsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD48dsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD64dsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDDSv6, MinimumVersion: ver419} + vmSizeStandardD96dsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDDSv6, MinimumVersion: ver419} + + vmSizeStandardE4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardESv3} + vmSizeStandardE8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardESv3} + vmSizeStandardE16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardESv3} + vmSizeStandardE32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardESv3} + + vmSizeStandardE2sV4Struct = VMSizeStruct{CoreCount: 2, Family: standardESv4} + vmSizeStandardE4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardESv4} + vmSizeStandardE8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardESv4} + vmSizeStandardE16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardESv4} + vmSizeStandardE20sV4Struct = VMSizeStruct{CoreCount: 20, Family: standardESv4} + vmSizeStandardE32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardESv4} + vmSizeStandardE48sV4Struct = VMSizeStruct{CoreCount: 48, Family: standardESv4} + vmSizeStandardE64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardESv4} + + vmSizeStandardE2sV5Struct = VMSizeStruct{CoreCount: 2, Family: standardESv5} + vmSizeStandardE4sV5Struct = VMSizeStruct{CoreCount: 4, Family: standardESv5} + vmSizeStandardE8sV5Struct = VMSizeStruct{CoreCount: 8, Family: standardESv5} + vmSizeStandardE16sV5Struct = VMSizeStruct{CoreCount: 16, Family: standardESv5} + vmSizeStandardE20sV5Struct = VMSizeStruct{CoreCount: 20, Family: standardESv5} + vmSizeStandardE32sV5Struct = VMSizeStruct{CoreCount: 32, Family: standardESv5} + vmSizeStandardE48sV5Struct = VMSizeStruct{CoreCount: 48, Family: standardESv5} + vmSizeStandardE64sV5Struct = VMSizeStruct{CoreCount: 64, Family: standardESv5} + vmSizeStandardE96sV5Struct = VMSizeStruct{CoreCount: 96, Family: standardESv5} + + vmSizeStandardE4asV4Struct = VMSizeStruct{CoreCount: 4, Family: standardEASv4} + vmSizeStandardE8asV4Struct = VMSizeStruct{CoreCount: 8, Family: standardEASv4} + vmSizeStandardE16asV4Struct = VMSizeStruct{CoreCount: 16, Family: standardEASv4} + vmSizeStandardE20asV4Struct = VMSizeStruct{CoreCount: 20, Family: standardEASv4} + vmSizeStandardE32asV4Struct = VMSizeStruct{CoreCount: 32, Family: standardEASv4} + vmSizeStandardE48asV4Struct = VMSizeStruct{CoreCount: 48, Family: standardEASv4} + vmSizeStandardE64asV4Struct = VMSizeStruct{CoreCount: 64, Family: standardEASv4} + vmSizeStandardE96asV4Struct = VMSizeStruct{CoreCount: 96, Family: standardEASv4} + + vmSizeStandardE8asV5Struct = VMSizeStruct{CoreCount: 8, Family: standardEASv5} + vmSizeStandardE16asV5Struct = VMSizeStruct{CoreCount: 16, Family: standardEASv5} + vmSizeStandardE20asV5Struct = VMSizeStruct{CoreCount: 20, Family: standardEASv5} + vmSizeStandardE32asV5Struct = VMSizeStruct{CoreCount: 32, Family: standardEASv5} + vmSizeStandardE48asV5Struct = VMSizeStruct{CoreCount: 48, Family: standardEASv5} + vmSizeStandardE64asV5Struct = VMSizeStruct{CoreCount: 64, Family: standardEASv5} + vmSizeStandardE96asV5Struct = VMSizeStruct{CoreCount: 96, Family: standardEASv5} + + vmSizeStandardE64isV3Struct = VMSizeStruct{CoreCount: 64, Family: standardESv3} + vmSizeStandardE80isV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEISv4} + vmSizeStandardE80idsV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEIDSv4} + vmSizeStandardE104isV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEISv5} + vmSizeStandardE104idsV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIDSv5} + + vmSizeStandardF4sV2Struct = VMSizeStruct{CoreCount: 4, Family: standardFSv2} + vmSizeStandardF8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardFSv2} + vmSizeStandardF16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardFSv2} + vmSizeStandardF32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardFSv2} + vmSizeStandardF72sV2Struct = VMSizeStruct{CoreCount: 72, Family: standardFSv2} + + vmSizeStandardM128msStruct = VMSizeStruct{CoreCount: 128, Family: standardMS} + + vmSizeStandardL4sStruct = VMSizeStruct{CoreCount: 4, Family: standardLSv2} + vmSizeStandardL8sStruct = VMSizeStruct{CoreCount: 8, Family: standardLSv2} + vmSizeStandardL16sStruct = VMSizeStruct{CoreCount: 16, Family: standardLSv2} + vmSizeStandardL32sStruct = VMSizeStruct{CoreCount: 32, Family: standardLSv2} + + vmSizeStandardL8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv2} + vmSizeStandardL16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv2} + vmSizeStandardL32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv2} + vmSizeStandardL48sV2Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv2} + vmSizeStandardL64sV2Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv2} + + vmSizeStandardL8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv3} + vmSizeStandardL16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv3} + vmSizeStandardL32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv3} + vmSizeStandardL48sV3Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv3} + vmSizeStandardL64sV3Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv3} + + vmSizeStandardL4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL48sV4Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv4, MinimumVersion: ver419} + vmSizeStandardL80sV4Struct = VMSizeStruct{CoreCount: 80, Family: standardLSv4, MinimumVersion: ver419} + + vmSizeStandardD4lsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD8lsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD16lsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD32lsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD48lsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD64lsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDLSv6, MinimumVersion: ver419} + vmSizeStandardD96lsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDLSv6, MinimumVersion: ver419} + + vmSizeStandardD4ldsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD8ldsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD16ldsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD32ldsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD48ldsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD64ldsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDLDSv6, MinimumVersion: ver419} + vmSizeStandardD96ldsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDLDSv6, MinimumVersion: ver419} + + // GPU nodes + // The formatting of the ncasv3_t4 family is different. This can be seen through a + // az vm list-usage -l eastus + vmSizeStandardNC4asT4V3Struct = VMSizeStruct{CoreCount: 4, Family: standardNCAS} + vmSizeStandardNC8asT4V3Struct = VMSizeStruct{CoreCount: 8, Family: standardNCAS} + vmSizeStandardNC16asT4V3Struct = VMSizeStruct{CoreCount: 16, Family: standardNCAS} + vmSizeStandardNC64asT4V3Struct = VMSizeStruct{CoreCount: 64, Family: standardNCAS} + + vmSizeStandardNC6sV3Struct = VMSizeStruct{CoreCount: 6, Family: standardNCSv3} + vmSizeStandardNC12sV3Struct = VMSizeStruct{CoreCount: 12, Family: standardNCSv3} + vmSizeStandardNC24sV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3} + vmSizeStandardNC24rsV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3} +) + +// VMFamily represents a VM family +type VMFamily string + +func (vmFamily VMFamily) String() string { + return string(vmFamily) +} + +const ( + standardDSv3 VMFamily = "standardDSv3Family" + standardDSv4 VMFamily = "standardDSv4Family" + standardDSv5 VMFamily = "standardDSv5Family" + standardDSv6 VMFamily = "standardDSv6Family" + standardDASv4 VMFamily = "standardDASv4Family" + standardDASv5 VMFamily = "standardDASv5Family" + standardDDSv5 VMFamily = "standardDDSv5Family" + standardDDSv6 VMFamily = "standardDDSv6Family" + standardESv3 VMFamily = "standardESv3Family" + standardESv4 VMFamily = "standardESv4Family" + standardESv5 VMFamily = "standardESv5Family" + standardEASv4 VMFamily = "standardEASv4Family" + standardEASv5 VMFamily = "standardEASv5Family" + standardEISv4 VMFamily = "standardEISv4Family" + standardEIDSv4 VMFamily = "standardEIDSv4Family" + standardEISv5 VMFamily = "standardEISv5Family" + standardEIDSv5 VMFamily = "standardEIDSv5Family" + standardFSv2 VMFamily = "standardFSv2Family" + standardMS VMFamily = "standardMSFamily" + standardLSv2 VMFamily = "standardLsv2Family" + standardLSv3 VMFamily = "standardLsv3Family" + standardLSv4 VMFamily = "standardLsv4Family" + standardDLSv6 VMFamily = "standardDLSv6Family" + standardDLDSv6 VMFamily = "standardDLDSv6Family" + standardNCAS VMFamily = "Standard NCASv3_T4 Family" + standardNCSv3 VMFamily = "Standard NCSv3 Family" +) From b0a664b17e429fb2baee08c50d7acfe2fbd0a40f Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 17:14:33 +0200 Subject: [PATCH 02/12] [ARO-24603] Refactor API module to use centralized vms package Move VM size types and constants from api/openshiftcluster.go and admin/openshiftcluster.go into the new vms package. Simplify validate/vm.go by delegating to vms maps. Update Static() validator interface to replace requireD2sWorkers bool with isCI bool, and update all 11 API version implementations, convert files, and tests. Co-Authored-By: Claude Opus 4.6 --- pkg/api/admin/openshiftcluster.go | 176 +--- pkg/api/admin/openshiftcluster_convert.go | 12 +- .../admin/openshiftcluster_validatestatic.go | 2 +- .../openshiftcluster_validatestatic_test.go | 11 +- pkg/api/openshiftcluster.go | 392 +-------- pkg/api/openshiftclusterdocument_example.go | 16 +- pkg/api/register.go | 2 +- pkg/api/util/vms/sizes.go | 2 + pkg/api/util/vms/types.go | 4 + .../openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 34 +- pkg/api/v20200430/openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 34 +- .../openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 34 +- pkg/api/v20220401/openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 34 +- pkg/api/v20220904/openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 38 +- pkg/api/v20230401/openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 38 +- .../openshiftcluster_convert.go | 5 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 26 +- pkg/api/v20230904/openshiftcluster_convert.go | 7 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 38 +- pkg/api/v20231122/openshiftcluster_convert.go | 7 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 26 +- .../openshiftcluster_convert.go | 7 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 26 +- pkg/api/v20250725/openshiftcluster_convert.go | 7 +- .../openshiftcluster_validatestatic.go | 27 +- .../openshiftcluster_validatestatic_test.go | 67 +- pkg/api/validate/vm.go | 446 +--------- pkg/api/validate/vm_test.go | 765 +++++++++--------- 44 files changed, 745 insertions(+), 1838 deletions(-) diff --git a/pkg/api/admin/openshiftcluster.go b/pkg/api/admin/openshiftcluster.go index 4494d4ec6d7..5cbf6625565 100644 --- a/pkg/api/admin/openshiftcluster.go +++ b/pkg/api/admin/openshiftcluster.go @@ -5,6 +5,8 @@ package admin import ( "time" + + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) // OpenShiftClusterList represents a list of OpenShift clusters. @@ -267,185 +269,15 @@ const ( // MasterProfile represents a master profile. type MasterProfile struct { - VMSize VMSize `json:"vmSize,omitempty"` + VMSize vms.VMSize `json:"vmSize,omitempty"` SubnetID string `json:"subnetId,omitempty"` EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"` DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"` } - -// VMSize represents a VM size. -type VMSize string - -// VMSize constants. -const ( - VMSizeStandardD2sV3 VMSize = "Standard_D2s_v3" - VMSizeStandardD4sV3 VMSize = "Standard_D4s_v3" - VMSizeStandardD8sV3 VMSize = "Standard_D8s_v3" - VMSizeStandardD16sV3 VMSize = "Standard_D16s_v3" - VMSizeStandardD32sV3 VMSize = "Standard_D32s_v3" - - VMSizeStandardD4sV4 VMSize = "Standard_D4s_v4" - VMSizeStandardD8sV4 VMSize = "Standard_D8s_v4" - VMSizeStandardD16sV4 VMSize = "Standard_D16s_v4" - VMSizeStandardD32sV4 VMSize = "Standard_D32s_v4" - VMSizeStandardD64sV4 VMSize = "Standard_D64s_v4" - - VMSizeStandardD4sV5 VMSize = "Standard_D4s_v5" - VMSizeStandardD8sV5 VMSize = "Standard_D8s_v5" - VMSizeStandardD16sV5 VMSize = "Standard_D16s_v5" - VMSizeStandardD32sV5 VMSize = "Standard_D32s_v5" - VMSizeStandardD64sV5 VMSize = "Standard_D64s_v5" - VMSizeStandardD96sV5 VMSize = "Standard_D96s_v5" - - VMSizeStandardD4asV4 VMSize = "Standard_D4as_v4" - VMSizeStandardD8asV4 VMSize = "Standard_D8as_v4" - VMSizeStandardD16asV4 VMSize = "Standard_D16as_v4" - VMSizeStandardD32asV4 VMSize = "Standard_D32as_v4" - VMSizeStandardD64asV4 VMSize = "Standard_D64as_v4" - VMSizeStandardD96asV4 VMSize = "Standard_D96as_v4" - - VMSizeStandardD4asV5 VMSize = "Standard_D4as_v5" - VMSizeStandardD8asV5 VMSize = "Standard_D8as_v5" - VMSizeStandardD16asV5 VMSize = "Standard_D16as_v5" - VMSizeStandardD32asV5 VMSize = "Standard_D32as_v5" - VMSizeStandardD64asV5 VMSize = "Standard_D64as_v5" - VMSizeStandardD96asV5 VMSize = "Standard_D96as_v5" - - VMSizeStandardD4dsV5 VMSize = "Standard_D4ds_v5" - VMSizeStandardD8dsV5 VMSize = "Standard_D8ds_v5" - VMSizeStandardD16dsV5 VMSize = "Standard_D16ds_v5" - VMSizeStandardD32dsV5 VMSize = "Standard_D32ds_v5" - VMSizeStandardD48dsV5 VMSize = "Standard_D48ds_v5" - VMSizeStandardD64dsV5 VMSize = "Standard_D64ds_v5" - VMSizeStandardD96dsV5 VMSize = "Standard_D96ds_v5" - - VMSizeStandardD4sV6 VMSize = "Standard_D4s_v6" - VMSizeStandardD8sV6 VMSize = "Standard_D8s_v6" - VMSizeStandardD16sV6 VMSize = "Standard_D16s_v6" - VMSizeStandardD32sV6 VMSize = "Standard_D32s_v6" - VMSizeStandardD64sV6 VMSize = "Standard_D64s_v6" - VMSizeStandardD96sV6 VMSize = "Standard_D96s_v6" - - VMSizeStandardD4dsV6 VMSize = "Standard_D4ds_v6" - VMSizeStandardD8dsV6 VMSize = "Standard_D8ds_v6" - VMSizeStandardD16dsV6 VMSize = "Standard_D16ds_v6" - VMSizeStandardD32dsV6 VMSize = "Standard_D32ds_v6" - VMSizeStandardD64dsV6 VMSize = "Standard_D64ds_v6" - VMSizeStandardD96dsV6 VMSize = "Standard_D96ds_v6" - - VMSizeStandardE4sV3 VMSize = "Standard_E4s_v3" - VMSizeStandardE8sV3 VMSize = "Standard_E8s_v3" - VMSizeStandardE16sV3 VMSize = "Standard_E16s_v3" - VMSizeStandardE32sV3 VMSize = "Standard_E32s_v3" - - VMSizeStandardE2sV4 VMSize = "Standard_E2s_v4" - VMSizeStandardE4sV4 VMSize = "Standard_E4s_v4" - VMSizeStandardE8sV4 VMSize = "Standard_E8s_v4" - VMSizeStandardE16sV4 VMSize = "Standard_E16s_v4" - VMSizeStandardE20sV4 VMSize = "Standard_E20s_v4" - VMSizeStandardE32sV4 VMSize = "Standard_E32s_v4" - VMSizeStandardE48sV4 VMSize = "Standard_E48s_v4" - VMSizeStandardE64sV4 VMSize = "Standard_E64s_v4" - - VMSizeStandardE2sV5 VMSize = "Standard_E2s_v5" - VMSizeStandardE4sV5 VMSize = "Standard_E4s_v5" - VMSizeStandardE8sV5 VMSize = "Standard_E8s_v5" - VMSizeStandardE16sV5 VMSize = "Standard_E16s_v5" - VMSizeStandardE20sV5 VMSize = "Standard_E20s_v5" - VMSizeStandardE32sV5 VMSize = "Standard_E32s_v5" - VMSizeStandardE48sV5 VMSize = "Standard_E48s_v5" - VMSizeStandardE64sV5 VMSize = "Standard_E64s_v5" - VMSizeStandardE96sV5 VMSize = "Standard_E96s_v5" - - VMSizeStandardE4asV4 VMSize = "Standard_E4as_v4" - VMSizeStandardE8asV4 VMSize = "Standard_E8as_v4" - VMSizeStandardE16asV4 VMSize = "Standard_E16as_v4" - VMSizeStandardE20asV4 VMSize = "Standard_E20as_v4" - VMSizeStandardE32asV4 VMSize = "Standard_E32as_v4" - VMSizeStandardE48asV4 VMSize = "Standard_E48as_v4" - VMSizeStandardE64asV4 VMSize = "Standard_E64as_v4" - VMSizeStandardE96asV4 VMSize = "Standard_E96as_v4" - - VMSizeStandardE8asV5 VMSize = "Standard_E8as_v5" - VMSizeStandardE16asV5 VMSize = "Standard_E16as_v5" - VMSizeStandardE20asV5 VMSize = "Standard_E20as_v5" - VMSizeStandardE32asV5 VMSize = "Standard_E32as_v5" - VMSizeStandardE48asV5 VMSize = "Standard_E48as_v5" - VMSizeStandardE64asV5 VMSize = "Standard_E64as_v5" - VMSizeStandardE96asV5 VMSize = "Standard_E96as_v5" - - VMSizeStandardE64isV3 VMSize = "Standard_E64is_v3" - VMSizeStandardE80isV4 VMSize = "Standard_E80is_v4" - VMSizeStandardE80idsV4 VMSize = "Standard_E80ids_v4" - VMSizeStandardE96dsV5 VMSize = "Standard_E96ds_v5" - VMSizeStandardE104isV5 VMSize = "Standard_E104is_v5" - VMSizeStandardE104idsV5 VMSize = "Standard_E104ids_v5" - - VMSizeStandardF4sV2 VMSize = "Standard_F4s_v2" - VMSizeStandardF8sV2 VMSize = "Standard_F8s_v2" - VMSizeStandardF16sV2 VMSize = "Standard_F16s_v2" - VMSizeStandardF32sV2 VMSize = "Standard_F32s_v2" - VMSizeStandardF72sV2 VMSize = "Standard_F72s_v2" - - VMSizeStandardM128ms VMSize = "Standard_M128ms" - - VMSizeStandardL4s VMSize = "Standard_L4s" - VMSizeStandardL8s VMSize = "Standard_L8s" - VMSizeStandardL16s VMSize = "Standard_L16s" - VMSizeStandardL32s VMSize = "Standard_L32s" - - VMSizeStandardL8sV2 VMSize = "Standard_L8s_v2" - VMSizeStandardL16sV2 VMSize = "Standard_L16s_v2" - VMSizeStandardL32sV2 VMSize = "Standard_L32s_v2" - VMSizeStandardL48sV2 VMSize = "Standard_L48s_v2" - VMSizeStandardL64sV2 VMSize = "Standard_L64s_v2" - - VMSizeStandardL8sV3 VMSize = "Standard_L8s_v3" - VMSizeStandardL16sV3 VMSize = "Standard_L16s_v3" - VMSizeStandardL32sV3 VMSize = "Standard_L32s_v3" - VMSizeStandardL48sV3 VMSize = "Standard_L48s_v3" - VMSizeStandardL64sV3 VMSize = "Standard_L64s_v3" - - VMSizeStandardL4sV4 VMSize = "Standard_L4s_v4" - VMSizeStandardL8sV4 VMSize = "Standard_L8s_v4" - VMSizeStandardL16sV4 VMSize = "Standard_L16s_v4" - VMSizeStandardL32sV4 VMSize = "Standard_L32s_v4" - VMSizeStandardL48sV4 VMSize = "Standard_L48s_v4" - VMSizeStandardL64sV4 VMSize = "Standard_L64s_v4" - VMSizeStandardL80sV4 VMSize = "Standard_L80s_v4" - - VMSizeStandardD4lsV6 VMSize = "Standard_D4ls_v6" - VMSizeStandardD8lsV6 VMSize = "Standard_D8ls_v6" - VMSizeStandardD16lsV6 VMSize = "Standard_D16ls_v6" - VMSizeStandardD32lsV6 VMSize = "Standard_D32ls_v6" - VMSizeStandardD48lsV6 VMSize = "Standard_D48ls_v6" - VMSizeStandardD64lsV6 VMSize = "Standard_D64ls_v6" - VMSizeStandardD96lsV6 VMSize = "Standard_D96ls_v6" - - VMSizeStandardD4ldsV6 VMSize = "Standard_D4lds_v6" - VMSizeStandardD8ldsV6 VMSize = "Standard_D8lds_v6" - VMSizeStandardD16ldsV6 VMSize = "Standard_D1l6ds_v6" - VMSizeStandardD32ldsV6 VMSize = "Standard_D32lds_v6" - VMSizeStandardD48ldsV6 VMSize = "Standard_D48lds_v6" - VMSizeStandardD64ldsV6 VMSize = "Standard_D64lds_v6" - VMSizeStandardD96ldsV6 VMSize = "Standard_D96lds_v6" - - // GPU VMs - VMSizeStandardNC4asT4V3 VMSize = "Standard_NC4as_T4_v3" - VMSizeStandardNC8asT4V3 VMSize = "Standard_NC8as_T4_v3" - VMSizeStandardNC16asT4V3 VMSize = "Standard_NC16as_T4_v3" - VMSizeStandardNC64asT4V3 VMSize = "Standard_NC64as_T4_v3" - - VMSizeStandardNC6sV3 VMSize = "Standard_NC6s_v3" - VMSizeStandardNC12sV3 VMSize = "Standard_NC12s_v3" - VMSizeStandardNC24sV3 VMSize = "Standard_NC24s_v3" - VMSizeStandardNC24rsV3 VMSize = "Standard_NC24rs_v3" -) - // WorkerProfile represents a worker profile. type WorkerProfile struct { Name string `json:"name,omitempty"` - VMSize VMSize `json:"vmSize,omitempty"` + VMSize vms.VMSize `json:"vmSize,omitempty"` DiskSizeGB int `json:"diskSizeGB,omitempty"` SubnetID string `json:"subnetId,omitempty"` Count int `json:"count,omitempty"` diff --git a/pkg/api/admin/openshiftcluster_convert.go b/pkg/api/admin/openshiftcluster_convert.go index ee53e80200e..5700d357909 100644 --- a/pkg/api/admin/openshiftcluster_convert.go +++ b/pkg/api/admin/openshiftcluster_convert.go @@ -62,7 +62,7 @@ func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfac }(), }, MasterProfile: MasterProfile{ - VMSize: VMSize(oc.Properties.MasterProfile.VMSize), + VMSize: oc.Properties.MasterProfile.VMSize, SubnetID: oc.Properties.MasterProfile.SubnetID, EncryptionAtHost: EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost), DiskEncryptionSetID: oc.Properties.MasterProfile.DiskEncryptionSetID, @@ -133,7 +133,7 @@ func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfac for _, p := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles = append(out.Properties.WorkerProfiles, WorkerProfile{ Name: p.Name, - VMSize: VMSize(p.VMSize), + VMSize: p.VMSize, DiskSizeGB: p.DiskSizeGB, SubnetID: p.SubnetID, Count: p.Count, @@ -148,7 +148,7 @@ func (c openShiftClusterConverter) ToExternal(oc *api.OpenShiftCluster) interfac for _, p := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus = append(out.Properties.WorkerProfilesStatus, WorkerProfile{ Name: p.Name, - VMSize: VMSize(p.VMSize), + VMSize: p.VMSize, DiskSizeGB: p.DiskSizeGB, SubnetID: p.SubnetID, Count: p.Count, @@ -370,7 +370,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } } - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = oc.Properties.MasterProfile.VMSize out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -381,7 +381,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = oc.Properties.WorkerProfiles[i].VMSize out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count @@ -394,7 +394,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfilesStatus = make([]api.WorkerProfile, len(oc.Properties.WorkerProfilesStatus)) for i := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus[i].Name = oc.Properties.WorkerProfilesStatus[i].Name - out.Properties.WorkerProfilesStatus[i].VMSize = api.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) + out.Properties.WorkerProfilesStatus[i].VMSize = oc.Properties.WorkerProfilesStatus[i].VMSize out.Properties.WorkerProfilesStatus[i].DiskSizeGB = oc.Properties.WorkerProfilesStatus[i].DiskSizeGB out.Properties.WorkerProfilesStatus[i].SubnetID = oc.Properties.WorkerProfilesStatus[i].SubnetID out.Properties.WorkerProfilesStatus[i].Count = oc.Properties.WorkerProfilesStatus[i].Count diff --git a/pkg/api/admin/openshiftcluster_validatestatic.go b/pkg/api/admin/openshiftcluster_validatestatic.go index 1a5dd61a879..18141e42821 100644 --- a/pkg/api/admin/openshiftcluster_validatestatic.go +++ b/pkg/api/admin/openshiftcluster_validatestatic.go @@ -14,7 +14,7 @@ import ( type openShiftClusterStaticValidator struct{} // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location string, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { if _current == nil { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeRequestNotAllowed, "", "Admin API does not allow cluster creation.") } diff --git a/pkg/api/admin/openshiftcluster_validatestatic_test.go b/pkg/api/admin/openshiftcluster_validatestatic_test.go index 9cf8803614f..40dc61af6cf 100644 --- a/pkg/api/admin/openshiftcluster_validatestatic_test.go +++ b/pkg/api/admin/openshiftcluster_validatestatic_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/ARO-RP/pkg/api/test/validate" "github.com/Azure/ARO-RP/pkg/api/util/pointerutils" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) func TestOpenShiftClusterStaticValidateDelta(t *testing.T) { @@ -466,13 +467,13 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) { return &OpenShiftCluster{ Properties: OpenShiftClusterProperties{ MasterProfile: MasterProfile{ - VMSize: VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, }, } }, modify: func(oc *OpenShiftCluster) { - oc.Properties.MasterProfile.VMSize = VMSizeStandardD4sV3 + oc.Properties.MasterProfile.VMSize = vms.VMSizeStandardD4sV3 }, wantErr: "400: PropertyChangeNotAllowed: properties.masterProfile.vmSize: Changing property 'properties.masterProfile.vmSize' is not allowed.", }, @@ -484,13 +485,13 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) { WorkerProfiles: []WorkerProfile{ { Name: "worker", - VMSize: VMSizeStandardD2sV3, + VMSize: vms.VMSizeStandardD2sV3, }, }, }, } }, - modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = VMSizeStandardD4sV3 }, + modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = vms.VMSizeStandardD4sV3 }, wantErr: "400: PropertyChangeNotAllowed: properties.workerProfiles['worker'].vmSize: Changing property 'properties.workerProfiles['worker'].vmSize' is not allowed.", }, { @@ -788,7 +789,7 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) { (&openShiftClusterConverter{}).ToInternal(tt.oc(), current) v := &openShiftClusterStaticValidator{} - err := v.Static(oc, current, "", "", true, api.ArchitectureVersionV2, "") + err := v.Static(oc, current, false, "", "", api.ArchitectureVersionV2, "") if err == nil { if tt.wantErr != "" { t.Error(err) diff --git a/pkg/api/openshiftcluster.go b/pkg/api/openshiftcluster.go index 18f44116a41..1b6665850cc 100644 --- a/pkg/api/openshiftcluster.go +++ b/pkg/api/openshiftcluster.go @@ -9,6 +9,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) // OpenShiftCluster represents an OpenShift cluster @@ -441,403 +443,17 @@ const ( type MasterProfile struct { MissingFields - VMSize VMSize `json:"vmSize,omitempty"` + VMSize vms.VMSize `json:"vmSize,omitempty"` SubnetID string `json:"subnetId,omitempty"` EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"` DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"` } - -// VMSize represents a VM size -type VMSize string - -func (vmSize VMSize) String() string { - return string(vmSize) -} - -// VMSize constants -// add required resources in pkg/validate/dynamic/quota.go when adding a new VMSize -const ( - VMSizeStandardD2sV3 VMSize = "Standard_D2s_v3" - VMSizeStandardD4sV3 VMSize = "Standard_D4s_v3" - VMSizeStandardD8sV3 VMSize = "Standard_D8s_v3" - VMSizeStandardD16sV3 VMSize = "Standard_D16s_v3" - VMSizeStandardD32sV3 VMSize = "Standard_D32s_v3" - - VMSizeStandardD2sV4 VMSize = "Standard_D2s_v4" - VMSizeStandardD4sV4 VMSize = "Standard_D4s_v4" - VMSizeStandardD8sV4 VMSize = "Standard_D8s_v4" - VMSizeStandardD16sV4 VMSize = "Standard_D16s_v4" - VMSizeStandardD32sV4 VMSize = "Standard_D32s_v4" - VMSizeStandardD64sV4 VMSize = "Standard_D64s_v4" - - VMSizeStandardD2sV5 VMSize = "Standard_D2s_v5" - VMSizeStandardD4sV5 VMSize = "Standard_D4s_v5" - VMSizeStandardD8sV5 VMSize = "Standard_D8s_v5" - VMSizeStandardD16sV5 VMSize = "Standard_D16s_v5" - VMSizeStandardD32sV5 VMSize = "Standard_D32s_v5" - VMSizeStandardD64sV5 VMSize = "Standard_D64s_v5" - VMSizeStandardD96sV5 VMSize = "Standard_D96s_v5" - - VMSizeStandardD4asV4 VMSize = "Standard_D4as_v4" - VMSizeStandardD8asV4 VMSize = "Standard_D8as_v4" - VMSizeStandardD16asV4 VMSize = "Standard_D16as_v4" - VMSizeStandardD32asV4 VMSize = "Standard_D32as_v4" - VMSizeStandardD64asV4 VMSize = "Standard_D64as_v4" - VMSizeStandardD96asV4 VMSize = "Standard_D96as_v4" - - VMSizeStandardD4asV5 VMSize = "Standard_D4as_v5" - VMSizeStandardD8asV5 VMSize = "Standard_D8as_v5" - VMSizeStandardD16asV5 VMSize = "Standard_D16as_v5" - VMSizeStandardD32asV5 VMSize = "Standard_D32as_v5" - VMSizeStandardD64asV5 VMSize = "Standard_D64as_v5" - VMSizeStandardD96asV5 VMSize = "Standard_D96as_v5" - - VMSizeStandardD4dsV5 VMSize = "Standard_D4ds_v5" - VMSizeStandardD8dsV5 VMSize = "Standard_D8ds_v5" - VMSizeStandardD16dsV5 VMSize = "Standard_D16ds_v5" - VMSizeStandardD32dsV5 VMSize = "Standard_D32ds_v5" - VMSizeStandardD48dsV5 VMSize = "Standard_D48ds_v5" - VMSizeStandardD64dsV5 VMSize = "Standard_D64ds_v5" - VMSizeStandardD96dsV5 VMSize = "Standard_D96ds_v5" - - VMSizeStandardD4sV6 VMSize = "Standard_D4s_v6" - VMSizeStandardD8sV6 VMSize = "Standard_D8s_v6" - VMSizeStandardD16sV6 VMSize = "Standard_D16s_v6" - VMSizeStandardD32sV6 VMSize = "Standard_D32s_v6" - VMSizeStandardD48sV6 VMSize = "Standard_D48s_v6" - VMSizeStandardD64sV6 VMSize = "Standard_D64s_v6" - VMSizeStandardD96sV6 VMSize = "Standard_D96s_v6" - - VMSizeStandardD4dsV6 VMSize = "Standard_D4ds_v6" - VMSizeStandardD8dsV6 VMSize = "Standard_D8ds_v6" - VMSizeStandardD16dsV6 VMSize = "Standard_D16ds_v6" - VMSizeStandardD32dsV6 VMSize = "Standard_D32ds_v6" - VMSizeStandardD48dsV6 VMSize = "Standard_D48ds_v6" - VMSizeStandardD64dsV6 VMSize = "Standard_D64ds_v6" - VMSizeStandardD96dsV6 VMSize = "Standard_D96ds_v6" - - VMSizeStandardE4sV3 VMSize = "Standard_E4s_v3" - VMSizeStandardE8sV3 VMSize = "Standard_E8s_v3" - VMSizeStandardE16sV3 VMSize = "Standard_E16s_v3" - VMSizeStandardE32sV3 VMSize = "Standard_E32s_v3" - - VMSizeStandardE2sV4 VMSize = "Standard_E2s_v4" - VMSizeStandardE4sV4 VMSize = "Standard_E4s_v4" - VMSizeStandardE8sV4 VMSize = "Standard_E8s_v4" - VMSizeStandardE16sV4 VMSize = "Standard_E16s_v4" - VMSizeStandardE20sV4 VMSize = "Standard_E20s_v4" - VMSizeStandardE32sV4 VMSize = "Standard_E32s_v4" - VMSizeStandardE48sV4 VMSize = "Standard_E48s_v4" - VMSizeStandardE64sV4 VMSize = "Standard_E64s_v4" - - VMSizeStandardE2sV5 VMSize = "Standard_E2s_v5" - VMSizeStandardE4sV5 VMSize = "Standard_E4s_v5" - VMSizeStandardE8sV5 VMSize = "Standard_E8s_v5" - VMSizeStandardE16sV5 VMSize = "Standard_E16s_v5" - VMSizeStandardE20sV5 VMSize = "Standard_E20s_v5" - VMSizeStandardE32sV5 VMSize = "Standard_E32s_v5" - VMSizeStandardE48sV5 VMSize = "Standard_E48s_v5" - VMSizeStandardE64sV5 VMSize = "Standard_E64s_v5" - VMSizeStandardE96sV5 VMSize = "Standard_E96s_v5" - - VMSizeStandardE4asV4 VMSize = "Standard_E4as_v4" - VMSizeStandardE8asV4 VMSize = "Standard_E8as_v4" - VMSizeStandardE16asV4 VMSize = "Standard_E16as_v4" - VMSizeStandardE20asV4 VMSize = "Standard_E20as_v4" - VMSizeStandardE32asV4 VMSize = "Standard_E32as_v4" - VMSizeStandardE48asV4 VMSize = "Standard_E48as_v4" - VMSizeStandardE64asV4 VMSize = "Standard_E64as_v4" - VMSizeStandardE96asV4 VMSize = "Standard_E96as_v4" - - VMSizeStandardE8asV5 VMSize = "Standard_E8as_v5" - VMSizeStandardE16asV5 VMSize = "Standard_E16as_v5" - VMSizeStandardE20asV5 VMSize = "Standard_E20as_v5" - VMSizeStandardE32asV5 VMSize = "Standard_E32as_v5" - VMSizeStandardE48asV5 VMSize = "Standard_E48as_v5" - VMSizeStandardE64asV5 VMSize = "Standard_E64as_v5" - VMSizeStandardE96asV5 VMSize = "Standard_E96as_v5" - - VMSizeStandardE64isV3 VMSize = "Standard_E64is_v3" - VMSizeStandardE80isV4 VMSize = "Standard_E80is_v4" - VMSizeStandardE80idsV4 VMSize = "Standard_E80ids_v4" - VMSizeStandardE96dsV5 VMSize = "Standard_E96ds_v5" - VMSizeStandardE104isV5 VMSize = "Standard_E104is_v5" - VMSizeStandardE104idsV5 VMSize = "Standard_E104ids_v5" - - VMSizeStandardF4sV2 VMSize = "Standard_F4s_v2" - VMSizeStandardF8sV2 VMSize = "Standard_F8s_v2" - VMSizeStandardF16sV2 VMSize = "Standard_F16s_v2" - VMSizeStandardF32sV2 VMSize = "Standard_F32s_v2" - VMSizeStandardF72sV2 VMSize = "Standard_F72s_v2" - - VMSizeStandardM128ms VMSize = "Standard_M128ms" - - VMSizeStandardL4s VMSize = "Standard_L4s" - VMSizeStandardL8s VMSize = "Standard_L8s" - VMSizeStandardL16s VMSize = "Standard_L16s" - VMSizeStandardL32s VMSize = "Standard_L32s" - - VMSizeStandardL8sV2 VMSize = "Standard_L8s_v2" - VMSizeStandardL16sV2 VMSize = "Standard_L16s_v2" - VMSizeStandardL32sV2 VMSize = "Standard_L32s_v2" - VMSizeStandardL48sV2 VMSize = "Standard_L48s_v2" - VMSizeStandardL64sV2 VMSize = "Standard_L64s_v2" - - VMSizeStandardL8sV3 VMSize = "Standard_L8s_v3" - VMSizeStandardL16sV3 VMSize = "Standard_L16s_v3" - VMSizeStandardL32sV3 VMSize = "Standard_L32s_v3" - VMSizeStandardL48sV3 VMSize = "Standard_L48s_v3" - VMSizeStandardL64sV3 VMSize = "Standard_L64s_v3" - - VMSizeStandardL4sV4 VMSize = "Standard_L4s_v4" - VMSizeStandardL8sV4 VMSize = "Standard_L8s_v4" - VMSizeStandardL16sV4 VMSize = "Standard_L16s_v4" - VMSizeStandardL32sV4 VMSize = "Standard_L32s_v4" - VMSizeStandardL48sV4 VMSize = "Standard_L48s_v4" - VMSizeStandardL64sV4 VMSize = "Standard_L64s_v4" - VMSizeStandardL80sV4 VMSize = "Standard_L80s_v4" - - VMSizeStandardD4lsV6 VMSize = "Standard_D4ls_v6" - VMSizeStandardD8lsV6 VMSize = "Standard_D8ls_v6" - VMSizeStandardD16lsV6 VMSize = "Standard_D16ls_v6" - VMSizeStandardD32lsV6 VMSize = "Standard_D32ls_v6" - VMSizeStandardD48lsV6 VMSize = "Standard_D48ls_v6" - VMSizeStandardD64lsV6 VMSize = "Standard_D64ls_v6" - VMSizeStandardD96lsV6 VMSize = "Standard_D96ls_v6" - - VMSizeStandardD4ldsV6 VMSize = "Standard_D4lds_v6" - VMSizeStandardD8ldsV6 VMSize = "Standard_D8lds_v6" - VMSizeStandardD16ldsV6 VMSize = "Standard_D1l6ds_v6" - VMSizeStandardD32ldsV6 VMSize = "Standard_D32lds_v6" - VMSizeStandardD48ldsV6 VMSize = "Standard_D48lds_v6" - VMSizeStandardD64ldsV6 VMSize = "Standard_D64lds_v6" - VMSizeStandardD96ldsV6 VMSize = "Standard_D96lds_v6" - - // GPU VMs - VMSizeStandardNC4asT4V3 VMSize = "Standard_NC4as_T4_v3" - VMSizeStandardNC8asT4V3 VMSize = "Standard_NC8as_T4_v3" - VMSizeStandardNC16asT4V3 VMSize = "Standard_NC16as_T4_v3" - VMSizeStandardNC64asT4V3 VMSize = "Standard_NC64as_T4_v3" - - VMSizeStandardNC6sV3 VMSize = "Standard_NC6s_v3" - VMSizeStandardNC12sV3 VMSize = "Standard_NC12s_v3" - VMSizeStandardNC24sV3 VMSize = "Standard_NC24s_v3" - VMSizeStandardNC24rsV3 VMSize = "Standard_NC24rs_v3" -) - -type VMSizeStruct struct { - CoreCount int `json:"coreCount,omitempty"` - Family string `json:"family,omitempty"` -} - -var ( - VMSizeStandardD2sV3Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv3} - VMSizeStandardD4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv3} - VMSizeStandardD8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv3} - VMSizeStandardD16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv3} - VMSizeStandardD32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv3} - - VMSizeStandardD2sV4Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv4} - VMSizeStandardD4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv4} - VMSizeStandardD8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv4} - VMSizeStandardD16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv4} - VMSizeStandardD32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv4} - VMSizeStandardD64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv4} - - VMSizeStandardD2sV5Struct = VMSizeStruct{CoreCount: 2, Family: standardDSv5} - VMSizeStandardD4sV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv5} - VMSizeStandardD8sV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv5} - VMSizeStandardD16sV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv5} - VMSizeStandardD32sV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv5} - VMSizeStandardD64sV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv5} - VMSizeStandardD96sV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDSv5} - - VMSizeStandardD4asV4Struct = VMSizeStruct{CoreCount: 4, Family: standardDASv4} - VMSizeStandardD8asV4Struct = VMSizeStruct{CoreCount: 8, Family: standardDASv4} - VMSizeStandardD16asV4Struct = VMSizeStruct{CoreCount: 16, Family: standardDASv4} - VMSizeStandardD32asV4Struct = VMSizeStruct{CoreCount: 32, Family: standardDASv4} - VMSizeStandardD64asV4Struct = VMSizeStruct{CoreCount: 64, Family: standardDASv4} - VMSizeStandardD96asV4Struct = VMSizeStruct{CoreCount: 96, Family: standardDASv4} - - VMSizeStandardD4asV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDASv5} - VMSizeStandardD8asV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDASv5} - VMSizeStandardD16asV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDASv5} - VMSizeStandardD32asV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDASv5} - VMSizeStandardD64asV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDASv5} - VMSizeStandardD96asV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDASv5} - - VMSizeStandardD4dsV5Struct = VMSizeStruct{CoreCount: 4, Family: standardDDSv5} - VMSizeStandardD8dsV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDDSv5} - VMSizeStandardD16dsV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDDSv5} - VMSizeStandardD32dsV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDDSv5} - VMSizeStandardD48dsV5Struct = VMSizeStruct{CoreCount: 48, Family: standardDDSv5} - VMSizeStandardD64dsV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDDSv5} - VMSizeStandardD96dsV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDDSv5} - - VMSizeStandardD4sV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDSv6} - VMSizeStandardD8sV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDSv6} - VMSizeStandardD16sV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDSv6} - VMSizeStandardD32sV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDSv6} - VMSizeStandardD48sV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDSv6} - VMSizeStandardD64sV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDSv6} - VMSizeStandardD96sV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDSv6} - - VMSizeStandardD4dsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDDSv6} - VMSizeStandardD8dsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDDSv6} - VMSizeStandardD16dsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDDSv6} - VMSizeStandardD32dsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDDSv6} - VMSizeStandardD48dsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDDSv6} - VMSizeStandardD64dsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDDSv6} - VMSizeStandardD96dsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDDSv6} - - VMSizeStandardE4sV3Struct = VMSizeStruct{CoreCount: 4, Family: standardESv3} - VMSizeStandardE8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardESv3} - VMSizeStandardE16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardESv3} - VMSizeStandardE32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardESv3} - - VMSizeStandardE2sV4Struct = VMSizeStruct{CoreCount: 2, Family: standardESv4} - VMSizeStandardE4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardESv4} - VMSizeStandardE8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardESv4} - VMSizeStandardE16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardESv4} - VMSizeStandardE20sV4Struct = VMSizeStruct{CoreCount: 20, Family: standardESv4} - VMSizeStandardE32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardESv4} - VMSizeStandardE48sV4Struct = VMSizeStruct{CoreCount: 48, Family: standardESv4} - VMSizeStandardE64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardESv4} - - VMSizeStandardE2sV5Struct = VMSizeStruct{CoreCount: 2, Family: standardESv5} - VMSizeStandardE4sV5Struct = VMSizeStruct{CoreCount: 4, Family: standardESv5} - VMSizeStandardE8sV5Struct = VMSizeStruct{CoreCount: 8, Family: standardESv5} - VMSizeStandardE16sV5Struct = VMSizeStruct{CoreCount: 16, Family: standardESv5} - VMSizeStandardE20sV5Struct = VMSizeStruct{CoreCount: 20, Family: standardESv5} - VMSizeStandardE32sV5Struct = VMSizeStruct{CoreCount: 32, Family: standardESv5} - VMSizeStandardE48sV5Struct = VMSizeStruct{CoreCount: 48, Family: standardESv5} - VMSizeStandardE64sV5Struct = VMSizeStruct{CoreCount: 64, Family: standardESv5} - VMSizeStandardE96sV5Struct = VMSizeStruct{CoreCount: 96, Family: standardESv5} - - VMSizeStandardE4asV4Struct = VMSizeStruct{CoreCount: 4, Family: standardEASv4} - VMSizeStandardE8asV4Struct = VMSizeStruct{CoreCount: 8, Family: standardEASv4} - VMSizeStandardE16asV4Struct = VMSizeStruct{CoreCount: 16, Family: standardEASv4} - VMSizeStandardE20asV4Struct = VMSizeStruct{CoreCount: 20, Family: standardEASv4} - VMSizeStandardE32asV4Struct = VMSizeStruct{CoreCount: 32, Family: standardEASv4} - VMSizeStandardE48asV4Struct = VMSizeStruct{CoreCount: 48, Family: standardEASv4} - VMSizeStandardE64asV4Struct = VMSizeStruct{CoreCount: 64, Family: standardEASv4} - VMSizeStandardE96asV4Struct = VMSizeStruct{CoreCount: 96, Family: standardEASv4} - - VMSizeStandardE8asV5Struct = VMSizeStruct{CoreCount: 8, Family: standardEASv5} - VMSizeStandardE16asV5Struct = VMSizeStruct{CoreCount: 16, Family: standardEASv5} - VMSizeStandardE20asV5Struct = VMSizeStruct{CoreCount: 20, Family: standardEASv5} - VMSizeStandardE32asV5Struct = VMSizeStruct{CoreCount: 32, Family: standardEASv5} - VMSizeStandardE48asV5Struct = VMSizeStruct{CoreCount: 48, Family: standardEASv5} - VMSizeStandardE64asV5Struct = VMSizeStruct{CoreCount: 64, Family: standardEASv5} - VMSizeStandardE96asV5Struct = VMSizeStruct{CoreCount: 96, Family: standardEASv5} - - VMSizeStandardE64isV3Struct = VMSizeStruct{CoreCount: 64, Family: standardESv3} - VMSizeStandardE80isV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEISv4} - VMSizeStandardE80idsV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEIDSv4} - VMSizeStandardE96dsV5Struct = VMSizeStruct{CoreCount: 96, Family: standardEDSv5} - VMSizeStandardE104isV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEISv5} - VMSizeStandardE104idsV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIDSv5} - - VMSizeStandardF4sV2Struct = VMSizeStruct{CoreCount: 4, Family: standardFSv2} - VMSizeStandardF8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardFSv2} - VMSizeStandardF16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardFSv2} - VMSizeStandardF32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardFSv2} - VMSizeStandardF72sV2Struct = VMSizeStruct{CoreCount: 72, Family: standardFSv2} - - VMSizeStandardM128msStruct = VMSizeStruct{CoreCount: 128, Family: standardMS} - - VMSizeStandardL4sStruct = VMSizeStruct{CoreCount: 4, Family: standardLSv2} - VMSizeStandardL8sStruct = VMSizeStruct{CoreCount: 8, Family: standardLSv2} - VMSizeStandardL16sStruct = VMSizeStruct{CoreCount: 16, Family: standardLSv2} - VMSizeStandardL32sStruct = VMSizeStruct{CoreCount: 32, Family: standardLSv2} - - VMSizeStandardL8sV2Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv2} - VMSizeStandardL16sV2Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv2} - VMSizeStandardL32sV2Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv2} - VMSizeStandardL48sV2Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv2} - VMSizeStandardL64sV2Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv2} - - VMSizeStandardL8sV3Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv3} - VMSizeStandardL16sV3Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv3} - VMSizeStandardL32sV3Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv3} - VMSizeStandardL48sV3Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv3} - VMSizeStandardL64sV3Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv3} - - VMSizeStandardL4sV4Struct = VMSizeStruct{CoreCount: 4, Family: standardLSv4} - VMSizeStandardL8sV4Struct = VMSizeStruct{CoreCount: 8, Family: standardLSv4} - VMSizeStandardL16sV4Struct = VMSizeStruct{CoreCount: 16, Family: standardLSv4} - VMSizeStandardL32sV4Struct = VMSizeStruct{CoreCount: 32, Family: standardLSv4} - VMSizeStandardL48sV4Struct = VMSizeStruct{CoreCount: 48, Family: standardLSv4} - VMSizeStandardL64sV4Struct = VMSizeStruct{CoreCount: 64, Family: standardLSv4} - VMSizeStandardL80sV4Struct = VMSizeStruct{CoreCount: 80, Family: standardLSv4} - - VMSizeStandardD4lsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDLSv6} - VMSizeStandardD8lsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDLSv6} - VMSizeStandardD16lsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDLSv6} - VMSizeStandardD32lsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDLSv6} - VMSizeStandardD48lsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDLSv6} - VMSizeStandardD64lsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDLSv6} - VMSizeStandardD96lsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDLSv6} - - VMSizeStandardD4ldsV6Struct = VMSizeStruct{CoreCount: 4, Family: standardDLDSv6} - VMSizeStandardD8ldsV6Struct = VMSizeStruct{CoreCount: 8, Family: standardDLDSv6} - VMSizeStandardD16ldsV6Struct = VMSizeStruct{CoreCount: 16, Family: standardDLDSv6} - VMSizeStandardD32ldsV6Struct = VMSizeStruct{CoreCount: 32, Family: standardDLDSv6} - VMSizeStandardD48ldsV6Struct = VMSizeStruct{CoreCount: 48, Family: standardDLDSv6} - VMSizeStandardD64ldsV6Struct = VMSizeStruct{CoreCount: 64, Family: standardDLDSv6} - VMSizeStandardD96ldsV6Struct = VMSizeStruct{CoreCount: 96, Family: standardDLDSv6} - - // Struct GPU nodes - // Struct the formatting of the ncasv3_t4 family is different. This can be seen through a - // Struct az vm list-usage -l eastus - VMSizeStandardNC4asT4V3Struct = VMSizeStruct{CoreCount: 4, Family: standardNCAS} - VMSizeStandardNC8asT4V3Struct = VMSizeStruct{CoreCount: 8, Family: standardNCAS} - VMSizeStandardNC16asT4V3Struct = VMSizeStruct{CoreCount: 16, Family: standardNCAS} - VMSizeStandardNC64asT4V3Struct = VMSizeStruct{CoreCount: 64, Family: standardNCAS} - - VMSizeStandardNC6sV3Struct = VMSizeStruct{CoreCount: 6, Family: standardNCSv3} - VMSizeStandardNC12sV3Struct = VMSizeStruct{CoreCount: 12, Family: standardNCSv3} - VMSizeStandardNC24sV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3} - VMSizeStandardNC24rsV3Struct = VMSizeStruct{CoreCount: 24, Family: standardNCSv3} -) - -const ( - standardDSv3 = "standardDSv3Family" - standardDSv4 = "standardDSv4Family" - standardDSv5 = "standardDSv5Family" - standardDSv6 = "standardDSv6Family" - standardDASv4 = "standardDASv4Family" - standardDASv5 = "standardDASv5Family" - standardDDSv5 = "standardDDSv5Family" - standardDDSv6 = "standardDDSv6Family" - standardESv3 = "standardESv3Family" - standardESv4 = "standardESv4Family" - standardESv5 = "standardESv5Family" - standardEASv4 = "standardEASv4Family" - standardEASv5 = "standardEASv5Family" - standardEISv4 = "standardEISv4Family" - standardEIDSv4 = "standardEIDSv4Family" - standardEISv5 = "standardEISv5Family" - standardEDSv5 = "standardEDSv5Family" - standardEIDSv5 = "standardEIDSv5Family" - standardEIDv5 = "standardEIDv5Family" - standardFSv2 = "standardFSv2Family" - standardMS = "standardMSFamily" - standardLSv2 = "standardLsv2Family" - standardLSv3 = "standardLsv3Family" - standardLSv4 = "standardLsv4Family" - standardDLSv6 = "standardDLSv6Family" - standardDLDSv6 = "standardDLDSv6Family" - standardNCAS = "Standard NCASv3_T4 Family" - standardNCSv3 = "Standard NCSv3 Family" -) - // WorkerProfile represents a worker profile type WorkerProfile struct { MissingFields Name string `json:"name,omitempty"` - VMSize VMSize `json:"vmSize,omitempty"` + VMSize vms.VMSize `json:"vmSize,omitempty"` DiskSizeGB int `json:"diskSizeGB,omitempty"` SubnetID string `json:"subnetId,omitempty"` Count int `json:"count,omitempty"` diff --git a/pkg/api/openshiftclusterdocument_example.go b/pkg/api/openshiftclusterdocument_example.go index d66dee463e6..b1b099c2301 100644 --- a/pkg/api/openshiftclusterdocument_example.go +++ b/pkg/api/openshiftclusterdocument_example.go @@ -1,6 +1,10 @@ package api -import "time" +import ( + "time" + + "github.com/Azure/ARO-RP/pkg/api/util/vms" +) // Copyright (c) Microsoft Corporation. // Licensed under the Apache License 2.0. @@ -61,13 +65,13 @@ func ExampleOpenShiftClusterDocument() *OpenShiftClusterDocument { PreconfiguredNSG: PreconfiguredNSGDisabled, }, MasterProfile: MasterProfile{ - VMSize: VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, SubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnetResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/master", }, WorkerProfiles: []WorkerProfile{ { Name: "worker", - VMSize: VMSizeStandardD2sV3, + VMSize: vms.VMSizeStandardD2sV3, DiskSizeGB: 128, SubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnetResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/worker", Count: 3, @@ -76,21 +80,21 @@ func ExampleOpenShiftClusterDocument() *OpenShiftClusterDocument { WorkerProfilesStatus: []WorkerProfile{ { Name: "worker1", - VMSize: VMSizeStandardD2sV3, + VMSize: vms.VMSizeStandardD2sV3, DiskSizeGB: 128, SubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnetResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/worker", Count: 1, }, { Name: "worker2", - VMSize: VMSizeStandardD2sV3, + VMSize: vms.VMSizeStandardD2sV3, DiskSizeGB: 128, SubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnetResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/worker", Count: 1, }, { Name: "worker3", - VMSize: VMSizeStandardD2sV3, + VMSize: vms.VMSizeStandardD2sV3, DiskSizeGB: 128, SubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/vnetResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/worker", Count: 1, diff --git a/pkg/api/register.go b/pkg/api/register.go index 8c747320e1e..1c107d19d54 100644 --- a/pkg/api/register.go +++ b/pkg/api/register.go @@ -13,7 +13,7 @@ type OpenShiftClusterConverter interface { } type OpenShiftClusterStaticValidator interface { - Static(interface{}, *OpenShiftCluster, string, string, bool, ArchitectureVersion, string) error + Static(any, *OpenShiftCluster, bool, string, string, ArchitectureVersion, string) error } type OpenShiftClusterCredentialsConverter interface { diff --git a/pkg/api/util/vms/sizes.go b/pkg/api/util/vms/sizes.go index aeeb123d6e5..65182861ac5 100644 --- a/pkg/api/util/vms/sizes.go +++ b/pkg/api/util/vms/sizes.go @@ -147,6 +147,7 @@ var SupportedWorkerVMSizes = map[VMSize]VMSizeStruct{ VMSizeStandardD8dsV5: vmSizeStandardD8dsV5Struct, VMSizeStandardD16dsV5: vmSizeStandardD16dsV5Struct, VMSizeStandardD32dsV5: vmSizeStandardD32dsV5Struct, + VMSizeStandardD48dsV5: vmSizeStandardD48dsV5Struct, VMSizeStandardD64dsV5: vmSizeStandardD64dsV5Struct, VMSizeStandardD96dsV5: vmSizeStandardD96dsV5Struct, @@ -195,6 +196,7 @@ var SupportedWorkerVMSizes = map[VMSize]VMSizeStruct{ VMSizeStandardE64isV3: vmSizeStandardE64isV3Struct, VMSizeStandardE80isV4: vmSizeStandardE80isV4Struct, VMSizeStandardE80idsV4: vmSizeStandardE80idsV4Struct, + VMSizeStandardE96dsV5: vmSizeStandardE96dsV5Struct, VMSizeStandardE104isV5: vmSizeStandardE104isV5Struct, VMSizeStandardE104idsV5: vmSizeStandardE104idsV5Struct, diff --git a/pkg/api/util/vms/types.go b/pkg/api/util/vms/types.go index d98d88b96f6..ade5c22a20a 100644 --- a/pkg/api/util/vms/types.go +++ b/pkg/api/util/vms/types.go @@ -66,6 +66,7 @@ const ( VMSizeStandardD8dsV5 VMSize = "Standard_D8ds_v5" VMSizeStandardD16dsV5 VMSize = "Standard_D16ds_v5" VMSizeStandardD32dsV5 VMSize = "Standard_D32ds_v5" + VMSizeStandardD48dsV5 VMSize = "Standard_D48ds_v5" VMSizeStandardD64dsV5 VMSize = "Standard_D64ds_v5" VMSizeStandardD96dsV5 VMSize = "Standard_D96ds_v5" @@ -266,6 +267,7 @@ var ( vmSizeStandardD8dsV5Struct = VMSizeStruct{CoreCount: 8, Family: standardDDSv5} vmSizeStandardD16dsV5Struct = VMSizeStruct{CoreCount: 16, Family: standardDDSv5} vmSizeStandardD32dsV5Struct = VMSizeStruct{CoreCount: 32, Family: standardDDSv5} + vmSizeStandardD48dsV5Struct = VMSizeStruct{CoreCount: 48, Family: standardDDSv5} vmSizeStandardD64dsV5Struct = VMSizeStruct{CoreCount: 64, Family: standardDDSv5} vmSizeStandardD96dsV5Struct = VMSizeStruct{CoreCount: 96, Family: standardDDSv5} @@ -330,6 +332,7 @@ var ( vmSizeStandardE64isV3Struct = VMSizeStruct{CoreCount: 64, Family: standardESv3} vmSizeStandardE80isV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEISv4} vmSizeStandardE80idsV4Struct = VMSizeStruct{CoreCount: 80, Family: standardEIDSv4} + vmSizeStandardE96dsV5Struct = VMSizeStruct{CoreCount: 96, Family: standardEDSv5} vmSizeStandardE104isV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEISv5} vmSizeStandardE104idsV5Struct = VMSizeStruct{CoreCount: 104, Family: standardEIDSv5} @@ -420,6 +423,7 @@ const ( standardEISv4 VMFamily = "standardEISv4Family" standardEIDSv4 VMFamily = "standardEIDSv4Family" standardEISv5 VMFamily = "standardEISv5Family" + standardEDSv5 VMFamily = "standardEDSv5Family" standardEIDSv5 VMFamily = "standardEIDSv5Family" standardFSv2 VMFamily = "standardFSv2Family" standardMS VMFamily = "standardMSFamily" diff --git a/pkg/api/v20191231preview/openshiftcluster_convert.go b/pkg/api/v20191231preview/openshiftcluster_convert.go index d813b32b0fa..e5481c9c00c 100644 --- a/pkg/api/v20191231preview/openshiftcluster_convert.go +++ b/pkg/api/v20191231preview/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20191231preview import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -141,14 +142,14 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.WorkerProfiles = nil if oc.Properties.WorkerProfiles != nil { out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20191231preview/openshiftcluster_validatestatic.go b/pkg/api/v20191231preview/openshiftcluster_validatestatic.go index 35d01cb0a0a..6e49a50ee9f 100644 --- a/pkg/api/v20191231preview/openshiftcluster_validatestatic.go +++ b/pkg/api/v20191231preview/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -254,11 +255,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -278,11 +276,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20191231preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20191231preview/openshiftcluster_validatestatic_test.go index 13b13561e5c..0aff1921a36 100644 --- a/pkg/api/v20191231preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20191231preview/openshiftcluster_validatestatic_test.go @@ -17,11 +17,11 @@ import ( ) type validateTest struct { - name string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -100,10 +100,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := &openShiftClusterStaticValidator{ - location: "location", - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: id, + location: "location", + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: id, r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -132,7 +132,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -621,7 +621,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = VMSizeStandardD2sV3 }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -660,22 +660,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20200430/openshiftcluster_convert.go b/pkg/api/v20200430/openshiftcluster_convert.go index ef4cfadcc52..073863bef08 100644 --- a/pkg/api/v20200430/openshiftcluster_convert.go +++ b/pkg/api/v20200430/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20200430 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -141,14 +142,14 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.WorkerProfiles = nil if oc.Properties.WorkerProfiles != nil { out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20200430/openshiftcluster_validatestatic.go b/pkg/api/v20200430/openshiftcluster_validatestatic.go index 9d811e91829..44cbb72ad44 100644 --- a/pkg/api/v20200430/openshiftcluster_validatestatic.go +++ b/pkg/api/v20200430/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -257,11 +258,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -281,11 +279,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20200430/openshiftcluster_validatestatic_test.go b/pkg/api/v20200430/openshiftcluster_validatestatic_test.go index fef5e3c383d..90111c9e2f2 100644 --- a/pkg/api/v20200430/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20200430/openshiftcluster_validatestatic_test.go @@ -17,11 +17,11 @@ import ( ) type validateTest struct { - name string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -100,10 +100,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := &openShiftClusterStaticValidator{ - location: "location", - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: id, + location: "location", + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: id, r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -132,7 +132,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -621,7 +621,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = VMSizeStandardD2sV3 }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -660,22 +660,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20210901preview/openshiftcluster_convert.go b/pkg/api/v20210901preview/openshiftcluster_convert.go index ef1cbae6b16..f56b289a8b5 100644 --- a/pkg/api/v20210901preview/openshiftcluster_convert.go +++ b/pkg/api/v20210901preview/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20210901preview import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -156,7 +157,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR out.Properties.NetworkProfile.SoftwareDefinedNetwork = api.SoftwareDefinedNetwork(oc.Properties.NetworkProfile.SoftwareDefinedNetwork) - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -165,7 +166,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20210901preview/openshiftcluster_validatestatic.go b/pkg/api/v20210901preview/openshiftcluster_validatestatic.go index b28291bb0a3..5c4097179a6 100644 --- a/pkg/api/v20210901preview/openshiftcluster_validatestatic.go +++ b/pkg/api/v20210901preview/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -263,11 +264,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -304,11 +302,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20210901preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20210901preview/openshiftcluster_validatestatic_test.go index abfc7e553a4..12786742cae 100644 --- a/pkg/api/v20210901preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20210901preview/openshiftcluster_validatestatic_test.go @@ -18,11 +18,11 @@ import ( ) type validateTest struct { - name string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -117,10 +117,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := &openShiftClusterStaticValidator{ - location: "location", - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: id, + location: "location", + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: id, r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -149,7 +149,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -613,7 +613,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = VMSizeStandardD2sV3 }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -693,22 +693,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20220401/openshiftcluster_convert.go b/pkg/api/v20220401/openshiftcluster_convert.go index 0ea465c1f49..1b2e545f14a 100644 --- a/pkg/api/v20220401/openshiftcluster_convert.go +++ b/pkg/api/v20220401/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20220401 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -156,7 +157,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -165,7 +166,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20220401/openshiftcluster_validatestatic.go b/pkg/api/v20220401/openshiftcluster_validatestatic.go index 0735d6f0e40..a986f4fd1f7 100644 --- a/pkg/api/v20220401/openshiftcluster_validatestatic.go +++ b/pkg/api/v20220401/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -263,11 +264,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -304,11 +302,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20220401/openshiftcluster_validatestatic_test.go b/pkg/api/v20220401/openshiftcluster_validatestatic_test.go index e037195c7f2..759c2616bb3 100644 --- a/pkg/api/v20220401/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20220401/openshiftcluster_validatestatic_test.go @@ -18,11 +18,11 @@ import ( ) type validateTest struct { - name string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -117,10 +117,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := &openShiftClusterStaticValidator{ - location: "location", - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: id, + location: "location", + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: id, r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -149,7 +149,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -652,7 +652,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -732,22 +732,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20220904/openshiftcluster_convert.go b/pkg/api/v20220904/openshiftcluster_convert.go index 60137d5f2e3..dc7061e0763 100644 --- a/pkg/api/v20220904/openshiftcluster_convert.go +++ b/pkg/api/v20220904/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20220904 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -156,7 +157,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -165,7 +166,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20220904/openshiftcluster_validatestatic.go b/pkg/api/v20220904/openshiftcluster_validatestatic.go index beca3457aa3..9d3b5421f58 100644 --- a/pkg/api/v20220904/openshiftcluster_validatestatic.go +++ b/pkg/api/v20220904/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -263,11 +264,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -304,11 +302,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20220904/openshiftcluster_validatestatic_test.go b/pkg/api/v20220904/openshiftcluster_validatestatic_test.go index 1207274bee0..48bdcace73e 100644 --- a/pkg/api/v20220904/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20220904/openshiftcluster_validatestatic_test.go @@ -19,13 +19,13 @@ import ( ) type validateTest struct { - name string - clusterName *string - location *string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + clusterName *string + location *string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -130,10 +130,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -162,7 +162,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -665,7 +665,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -745,22 +745,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20230401/openshiftcluster_convert.go b/pkg/api/v20230401/openshiftcluster_convert.go index 4f3442cb10c..5226bbb51c0 100644 --- a/pkg/api/v20230401/openshiftcluster_convert.go +++ b/pkg/api/v20230401/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20230401 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -158,7 +159,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.NetworkProfile.PodCIDR = oc.Properties.NetworkProfile.PodCIDR out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR out.Properties.NetworkProfile.OutboundType = api.OutboundType(oc.Properties.NetworkProfile.OutboundType) - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -167,7 +168,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20230401/openshiftcluster_validatestatic.go b/pkg/api/v20230401/openshiftcluster_validatestatic.go index 5747cfab81d..e07b789aacd 100644 --- a/pkg/api/v20230401/openshiftcluster_validatestatic.go +++ b/pkg/api/v20230401/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -273,11 +274,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -314,11 +312,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20230401/openshiftcluster_validatestatic_test.go b/pkg/api/v20230401/openshiftcluster_validatestatic_test.go index 23658a483d9..6d2b6a5a082 100644 --- a/pkg/api/v20230401/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20230401/openshiftcluster_validatestatic_test.go @@ -19,13 +19,13 @@ import ( ) type validateTest struct { - name string - clusterName *string - location *string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + clusterName *string + location *string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -131,10 +131,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -163,7 +163,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -695,7 +695,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -775,22 +775,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20230701preview/openshiftcluster_convert.go b/pkg/api/v20230701preview/openshiftcluster_convert.go index 59ff4b9ddd7..3fe05981c68 100644 --- a/pkg/api/v20230701preview/openshiftcluster_convert.go +++ b/pkg/api/v20230701preview/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20230701preview import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -240,7 +241,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } } - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -249,7 +250,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count diff --git a/pkg/api/v20230701preview/openshiftcluster_validatestatic.go b/pkg/api/v20230701preview/openshiftcluster_validatestatic.go index af52d021608..09f1eaf562e 100644 --- a/pkg/api/v20230701preview/openshiftcluster_validatestatic.go +++ b/pkg/api/v20230701preview/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID architectureVersion := installArchitectureVersion @@ -364,11 +365,8 @@ func validateOutboundIPPrefixes(path string, outboundIPPrefixes []OutboundIPPref } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -405,11 +403,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20230701preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20230701preview/openshiftcluster_validatestatic_test.go index a53978d0e40..9ae4fc944ab 100644 --- a/pkg/api/v20230701preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20230701preview/openshiftcluster_validatestatic_test.go @@ -24,7 +24,7 @@ type validateTest struct { location *string current func(oc *OpenShiftCluster) modify func(oc *OpenShiftCluster) - requireD2sWorkers bool + isCI bool architectureVersion *api.ArchitectureVersion wantErr string } @@ -141,10 +141,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -175,7 +175,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -932,7 +932,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -1012,22 +1012,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20230904/openshiftcluster_convert.go b/pkg/api/v20230904/openshiftcluster_convert.go index 5bfbf0bbd66..a54ece52430 100644 --- a/pkg/api/v20230904/openshiftcluster_convert.go +++ b/pkg/api/v20230904/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20230904 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -170,7 +171,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.NetworkProfile.ServiceCIDR = oc.Properties.NetworkProfile.ServiceCIDR out.Properties.NetworkProfile.OutboundType = api.OutboundType(oc.Properties.NetworkProfile.OutboundType) out.Properties.NetworkProfile.PreconfiguredNSG = api.PreconfiguredNSG(oc.Properties.NetworkProfile.PreconfiguredNSG) - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -179,7 +180,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count @@ -192,7 +193,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfilesStatus = make([]api.WorkerProfile, len(oc.Properties.WorkerProfilesStatus)) for i := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus[i].Name = oc.Properties.WorkerProfilesStatus[i].Name - out.Properties.WorkerProfilesStatus[i].VMSize = api.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) + out.Properties.WorkerProfilesStatus[i].VMSize = vms.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) out.Properties.WorkerProfilesStatus[i].DiskSizeGB = oc.Properties.WorkerProfilesStatus[i].DiskSizeGB out.Properties.WorkerProfilesStatus[i].SubnetID = oc.Properties.WorkerProfilesStatus[i].SubnetID out.Properties.WorkerProfilesStatus[i].Count = oc.Properties.WorkerProfilesStatus[i].Count diff --git a/pkg/api/v20230904/openshiftcluster_validatestatic.go b/pkg/api/v20230904/openshiftcluster_validatestatic.go index e634191ea08..3a6a9622113 100644 --- a/pkg/api/v20230904/openshiftcluster_validatestatic.go +++ b/pkg/api/v20230904/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID oc := _oc.(*OpenShiftCluster) @@ -277,11 +278,8 @@ func (sv openShiftClusterStaticValidator) validateNetworkProfile(path string, np } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -318,11 +316,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20230904/openshiftcluster_validatestatic_test.go b/pkg/api/v20230904/openshiftcluster_validatestatic_test.go index 64073b884c4..d76f3b04770 100644 --- a/pkg/api/v20230904/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20230904/openshiftcluster_validatestatic_test.go @@ -19,13 +19,13 @@ import ( ) type validateTest struct { - name string - clusterName *string - location *string - current func(oc *OpenShiftCluster) - modify func(oc *OpenShiftCluster) - requireD2sWorkers bool - wantErr string + name string + clusterName *string + location *string + current func(oc *OpenShiftCluster) + modify func(oc *OpenShiftCluster) + isCI bool + wantErr string } type testMode string @@ -131,10 +131,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -163,7 +163,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(validOCForTest(), current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -711,7 +711,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -791,22 +791,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20231122/openshiftcluster_convert.go b/pkg/api/v20231122/openshiftcluster_convert.go index 59a2037c251..7930e8e05b9 100644 --- a/pkg/api/v20231122/openshiftcluster_convert.go +++ b/pkg/api/v20231122/openshiftcluster_convert.go @@ -5,6 +5,7 @@ package v20231122 import ( "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -217,7 +218,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } } - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -226,7 +227,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count @@ -239,7 +240,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfilesStatus = make([]api.WorkerProfile, len(oc.Properties.WorkerProfilesStatus)) for i := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus[i].Name = oc.Properties.WorkerProfilesStatus[i].Name - out.Properties.WorkerProfilesStatus[i].VMSize = api.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) + out.Properties.WorkerProfilesStatus[i].VMSize = vms.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) out.Properties.WorkerProfilesStatus[i].DiskSizeGB = oc.Properties.WorkerProfilesStatus[i].DiskSizeGB out.Properties.WorkerProfilesStatus[i].SubnetID = oc.Properties.WorkerProfilesStatus[i].SubnetID out.Properties.WorkerProfilesStatus[i].Count = oc.Properties.WorkerProfilesStatus[i].Count diff --git a/pkg/api/v20231122/openshiftcluster_validatestatic.go b/pkg/api/v20231122/openshiftcluster_validatestatic.go index 05f2131e853..34e1e42e053 100644 --- a/pkg/api/v20231122/openshiftcluster_validatestatic.go +++ b/pkg/api/v20231122/openshiftcluster_validatestatic.go @@ -17,23 +17,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID architectureVersion := installArchitectureVersion @@ -317,11 +318,8 @@ func validateManagedOutboundIPs(path string, managedOutboundIPs ManagedOutboundI } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -358,11 +356,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20231122/openshiftcluster_validatestatic_test.go b/pkg/api/v20231122/openshiftcluster_validatestatic_test.go index fe35c838f65..a5d2a2a9fe6 100644 --- a/pkg/api/v20231122/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20231122/openshiftcluster_validatestatic_test.go @@ -24,7 +24,7 @@ type validateTest struct { location *string current func(oc *OpenShiftCluster) modify func(oc *OpenShiftCluster) - requireD2sWorkers bool + isCI bool architectureVersion *api.ArchitectureVersion wantErr string } @@ -149,10 +149,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -190,7 +190,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(ext, current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Error(err) @@ -842,7 +842,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -922,22 +922,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20240812preview/openshiftcluster_convert.go b/pkg/api/v20240812preview/openshiftcluster_convert.go index 3f1e501ec23..0b9d0e151fc 100644 --- a/pkg/api/v20240812preview/openshiftcluster_convert.go +++ b/pkg/api/v20240812preview/openshiftcluster_convert.go @@ -6,6 +6,7 @@ package v20240812preview import ( "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api/util/pointerutils" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -304,7 +305,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } } - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -313,7 +314,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count @@ -326,7 +327,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfilesStatus = make([]api.WorkerProfile, len(oc.Properties.WorkerProfilesStatus)) for i := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus[i].Name = oc.Properties.WorkerProfilesStatus[i].Name - out.Properties.WorkerProfilesStatus[i].VMSize = api.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) + out.Properties.WorkerProfilesStatus[i].VMSize = vms.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) out.Properties.WorkerProfilesStatus[i].DiskSizeGB = oc.Properties.WorkerProfilesStatus[i].DiskSizeGB out.Properties.WorkerProfilesStatus[i].SubnetID = oc.Properties.WorkerProfilesStatus[i].SubnetID out.Properties.WorkerProfilesStatus[i].Count = oc.Properties.WorkerProfilesStatus[i].Count diff --git a/pkg/api/v20240812preview/openshiftcluster_validatestatic.go b/pkg/api/v20240812preview/openshiftcluster_validatestatic.go index a25bfec00fe..cfc4e987c8f 100644 --- a/pkg/api/v20240812preview/openshiftcluster_validatestatic.go +++ b/pkg/api/v20240812preview/openshiftcluster_validatestatic.go @@ -20,23 +20,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID architectureVersion := installArchitectureVersion @@ -327,11 +328,8 @@ func validateManagedOutboundIPs(path string, managedOutboundIPs ManagedOutboundI } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid.", mp.VMSize)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -368,11 +366,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go index d2c8c82e1a8..3ac0fed2c3b 100644 --- a/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go @@ -24,7 +24,7 @@ type validateTest struct { location *string current func(oc *OpenShiftCluster) modify func(oc *OpenShiftCluster) - requireD2sWorkers bool + isCI bool architectureVersion *api.ArchitectureVersion wantErr string } @@ -158,10 +158,10 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -199,7 +199,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(ext, current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Errorf("Expected error %s, got nil", tt.wantErr) @@ -849,7 +849,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", }, { name: "subnetId invalid", @@ -929,22 +929,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/v20250725/openshiftcluster_convert.go b/pkg/api/v20250725/openshiftcluster_convert.go index e0772125f0b..06a65048170 100644 --- a/pkg/api/v20250725/openshiftcluster_convert.go +++ b/pkg/api/v20250725/openshiftcluster_convert.go @@ -6,6 +6,7 @@ package v20250725 import ( "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api/util/pointerutils" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) type openShiftClusterConverter struct{} @@ -300,7 +301,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif } } - out.Properties.MasterProfile.VMSize = api.VMSize(oc.Properties.MasterProfile.VMSize) + out.Properties.MasterProfile.VMSize = vms.VMSize(oc.Properties.MasterProfile.VMSize) out.Properties.MasterProfile.SubnetID = oc.Properties.MasterProfile.SubnetID out.Properties.MasterProfile.EncryptionAtHost = api.EncryptionAtHost(oc.Properties.MasterProfile.EncryptionAtHost) out.Properties.MasterProfile.DiskEncryptionSetID = oc.Properties.MasterProfile.DiskEncryptionSetID @@ -309,7 +310,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfiles = make([]api.WorkerProfile, len(oc.Properties.WorkerProfiles)) for i := range oc.Properties.WorkerProfiles { out.Properties.WorkerProfiles[i].Name = oc.Properties.WorkerProfiles[i].Name - out.Properties.WorkerProfiles[i].VMSize = api.VMSize(oc.Properties.WorkerProfiles[i].VMSize) + out.Properties.WorkerProfiles[i].VMSize = vms.VMSize(oc.Properties.WorkerProfiles[i].VMSize) out.Properties.WorkerProfiles[i].DiskSizeGB = oc.Properties.WorkerProfiles[i].DiskSizeGB out.Properties.WorkerProfiles[i].SubnetID = oc.Properties.WorkerProfiles[i].SubnetID out.Properties.WorkerProfiles[i].Count = oc.Properties.WorkerProfiles[i].Count @@ -322,7 +323,7 @@ func (c openShiftClusterConverter) ToInternal(_oc interface{}, out *api.OpenShif out.Properties.WorkerProfilesStatus = make([]api.WorkerProfile, len(oc.Properties.WorkerProfilesStatus)) for i := range oc.Properties.WorkerProfilesStatus { out.Properties.WorkerProfilesStatus[i].Name = oc.Properties.WorkerProfilesStatus[i].Name - out.Properties.WorkerProfilesStatus[i].VMSize = api.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) + out.Properties.WorkerProfilesStatus[i].VMSize = vms.VMSize(oc.Properties.WorkerProfilesStatus[i].VMSize) out.Properties.WorkerProfilesStatus[i].DiskSizeGB = oc.Properties.WorkerProfilesStatus[i].DiskSizeGB out.Properties.WorkerProfilesStatus[i].SubnetID = oc.Properties.WorkerProfilesStatus[i].SubnetID out.Properties.WorkerProfilesStatus[i].Count = oc.Properties.WorkerProfilesStatus[i].Count diff --git a/pkg/api/v20250725/openshiftcluster_validatestatic.go b/pkg/api/v20250725/openshiftcluster_validatestatic.go index a40774f4b1c..b25d63f5a04 100644 --- a/pkg/api/v20250725/openshiftcluster_validatestatic.go +++ b/pkg/api/v20250725/openshiftcluster_validatestatic.go @@ -20,23 +20,24 @@ import ( "github.com/Azure/ARO-RP/pkg/api/util/pullsecret" apisubnet "github.com/Azure/ARO-RP/pkg/api/util/subnet" "github.com/Azure/ARO-RP/pkg/api/util/uuid" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" ) type openShiftClusterStaticValidator struct { - location string - domain string - requireD2sWorkers bool - resourceID string + location string + domain string + isCI bool + resourceID string r azure.Resource } // Validate validates an OpenShift cluster -func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, location, domain string, requireD2sWorkers bool, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { +func (sv openShiftClusterStaticValidator) Static(_oc interface{}, _current *api.OpenShiftCluster, isCI bool, location, domain string, installArchitectureVersion api.ArchitectureVersion, resourceID string) error { sv.location = location sv.domain = domain - sv.requireD2sWorkers = requireD2sWorkers + sv.isCI = isCI sv.resourceID = resourceID architectureVersion := installArchitectureVersion @@ -327,11 +328,8 @@ func validateManagedOutboundIPs(path string, managedOutboundIPs ManagedOutboundI } func (sv openShiftClusterStaticValidator) validateMasterProfile(path string, mp *MasterProfile, version string) error { - switch validate.VMSizeIsValidForVersion(api.VMSize(mp.VMSize), sv.requireD2sWorkers, true, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'master' role.", mp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for the chosen OpenShift version.", mp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(mp.VMSize), true, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided master VM size '%s' is invalid for version '%s'.", mp.VMSize, version)) } if !validate.RxSubnetID.MatchString(mp.SubnetID) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".subnetId", fmt.Sprintf("The provided master VM subnet '%s' is invalid.", mp.SubnetID)) @@ -368,11 +366,8 @@ func (sv openShiftClusterStaticValidator) validateWorkerProfile(path string, wp if wp.Name != "worker" { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".name", fmt.Sprintf("The provided worker name '%s' is invalid.", wp.Name)) } - switch validate.VMSizeIsValidForVersion(api.VMSize(wp.VMSize), sv.requireD2sWorkers, false, version) { - case validate.VMValidityNotSupportedForRole: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided VM size '%s' is invalid for the 'worker' role.", wp.VMSize)) - case validate.VMValidityNotSupportedInVersion: - return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid for the chosen OpenShift version.", wp.VMSize)) + if !validate.VMSizeIsValidForVersion(vms.VMSize(wp.VMSize), false, version, sv.isCI) { + return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".vmSize", fmt.Sprintf("The provided worker VM size '%s' is invalid.", wp.VMSize)) } if !validate.DiskSizeIsValid(wp.DiskSizeGB) { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, path+".diskSizeGB", fmt.Sprintf("The provided worker disk size '%d' is invalid.", wp.DiskSizeGB)) diff --git a/pkg/api/v20250725/openshiftcluster_validatestatic_test.go b/pkg/api/v20250725/openshiftcluster_validatestatic_test.go index 1ca729dd899..8a6fbe5ab4d 100644 --- a/pkg/api/v20250725/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20250725/openshiftcluster_validatestatic_test.go @@ -22,10 +22,9 @@ type validateTest struct { name string clusterName *string location *string - clusterVersion *string current func(oc *OpenShiftCluster) modify func(oc *OpenShiftCluster) - requireD2sWorkers bool + isCI bool architectureVersion *api.ArchitectureVersion wantErr string } @@ -73,7 +72,7 @@ func validSystemData() *SystemData { } } -func validOpenShiftCluster(name, location, version string) *OpenShiftCluster { +func validOpenShiftCluster(name, location string) *OpenShiftCluster { oc := &OpenShiftCluster{ ID: getResourceID(name), Name: name, @@ -87,7 +86,7 @@ func validOpenShiftCluster(name, location, version string) *OpenShiftCluster { ClusterProfile: ClusterProfile{ PullSecret: `{"auths":{"registry.connect.redhat.com":{"auth":""},"registry.redhat.io":{"auth":""}}}`, Domain: "cluster.location.aroapp.io", - Version: version, + Version: "4.10.0", ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", subscriptionID), FipsValidatedModules: FipsValidatedModulesDisabled, }, @@ -158,15 +157,11 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { tt.clusterName = pointerutils.ToPtr("resourceName") } - if tt.clusterVersion == nil { - tt.clusterVersion = pointerutils.ToPtr("4.10.0") - } - v := &openShiftClusterStaticValidator{ - location: *tt.location, - domain: "location.aroapp.io", - requireD2sWorkers: tt.requireD2sWorkers, - resourceID: getResourceID(*tt.clusterName), + location: *tt.location, + domain: "location.aroapp.io", + isCI: tt.isCI, + resourceID: getResourceID(*tt.clusterName), r: azure.Resource{ SubscriptionID: subscriptionID, ResourceGroup: "resourceGroup", @@ -177,7 +172,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { } validOCForTest := func() *OpenShiftCluster { - oc := validOpenShiftCluster(*tt.clusterName, *tt.location, *tt.clusterVersion) + oc := validOpenShiftCluster(*tt.clusterName, *tt.location) if tt.current != nil { tt.current(oc) } @@ -204,7 +199,7 @@ func runTests(t *testing.T, mode testMode, tests []*validateTest) { (&openShiftClusterConverter{}).ToInternal(ext, current) } - err := v.Static(oc, current, v.location, v.domain, tt.requireD2sWorkers, api.ArchitectureVersionV2, v.resourceID) + err := v.Static(oc, current, tt.isCI, v.location, v.domain, api.ArchitectureVersionV2, v.resourceID) if err == nil { if tt.wantErr != "" { t.Errorf("Expected error %s, got nil", tt.wantErr) @@ -854,7 +849,7 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'master' role.", + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid for version '4.10.0'.", }, { name: "subnetId invalid", @@ -910,21 +905,6 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID }, }, - { - name: "vmSize invalid for version", - clusterVersion: pointerutils.ToPtr("4.10.0"), - modify: func(oc *OpenShiftCluster) { - oc.Properties.MasterProfile.VMSize = "Standard_D8s_v6" - }, - wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D8s_v6' is invalid for the chosen OpenShift version.", - }, - { - name: "vmSize valid for version", - clusterVersion: pointerutils.ToPtr("4.20.0"), - modify: func(oc *OpenShiftCluster) { - oc.Properties.MasterProfile.VMSize = "Standard_D8s_v6" - }, - }, } runTests(t, testModeCreate, createTests) @@ -949,37 +929,14 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "invalid" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'invalid' is invalid for the 'worker' role.", - }, - { - name: "vmSize invalid for version", - clusterVersion: pointerutils.ToPtr("4.10.0"), - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D8s_v6" - }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D8s_v6' is invalid for the chosen OpenShift version.", - }, - { - name: "vmSize valid for version", - clusterVersion: pointerutils.ToPtr("4.20.0"), - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D8s_v6" - }, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'invalid' is invalid.", }, { name: "vmSize too small (prod)", modify: func(oc *OpenShiftCluster) { oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" }, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D2s_v3' is invalid for the 'worker' role.", - }, - { - name: "vmSize too big (dev)", - modify: func(oc *OpenShiftCluster) { - oc.Properties.WorkerProfiles[0].VMSize = "Standard_D4s_v3" - }, - requireD2sWorkers: true, - wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided VM size 'Standard_D4s_v3' is invalid for the 'worker' role.", + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v3' is invalid.", }, { name: "disk too small", diff --git a/pkg/api/validate/vm.go b/pkg/api/validate/vm.go index 76f7733bc19..9bdcfc32f2c 100644 --- a/pkg/api/validate/vm.go +++ b/pkg/api/validate/vm.go @@ -4,8 +4,8 @@ package validate // Licensed under the Apache License 2.0. import ( - "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api/util/version" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) // Public facing document which lists supported VM Sizes: @@ -13,438 +13,56 @@ import ( // To add new instance types, needs Project Management's involvement and instructions are below., // https://github.com/Azure/ARO-RP/blob/master/docs/adding-new-instance-types.md - -const ( - VMRoleMaster string = "master" - VMRoleWorker string = "worker" -) - -var supportedVMSizesByRoleMap = map[string]map[api.VMSize]api.VMSizeStruct{ - VMRoleMaster: supportedMasterVmSizes, - VMRoleWorker: supportedWorkerVmSizes, -} - -func SupportedVMSizesByRole(vmRole string) map[api.VMSize]api.VMSizeStruct { - supportedvmsizes, exists := supportedVMSizesByRoleMap[vmRole] - if !exists { - return nil - } - return supportedvmsizes -} - -type VMValidity int - -const ( - VMValidityOK VMValidity = iota - VMValidityNotSupportedForRole - VMValidityNotSupportedInVersion -) - -var ver419 = version.NewVersion(4, 19, 0) - -var masterVmSizesWithMinimumVersion = map[api.VMSize]version.Version{ - api.VMSizeStandardD4sV6: ver419, - api.VMSizeStandardD8sV6: ver419, - api.VMSizeStandardD16sV6: ver419, - api.VMSizeStandardD32sV6: ver419, - api.VMSizeStandardD48sV6: ver419, - api.VMSizeStandardD64sV6: ver419, - api.VMSizeStandardD96sV6: ver419, - - api.VMSizeStandardD4dsV6: ver419, - api.VMSizeStandardD8dsV6: ver419, - api.VMSizeStandardD16dsV6: ver419, - api.VMSizeStandardD32dsV6: ver419, - api.VMSizeStandardD48dsV6: ver419, - api.VMSizeStandardD64dsV6: ver419, - api.VMSizeStandardD96dsV6: ver419, -} - -var workerVmSizesWithMinimumVersion = map[api.VMSize]version.Version{ - api.VMSizeStandardD4sV6: ver419, - api.VMSizeStandardD8sV6: ver419, - api.VMSizeStandardD16sV6: ver419, - api.VMSizeStandardD32sV6: ver419, - api.VMSizeStandardD48sV6: ver419, - api.VMSizeStandardD64sV6: ver419, - api.VMSizeStandardD96sV6: ver419, - - api.VMSizeStandardD4dsV6: ver419, - api.VMSizeStandardD8dsV6: ver419, - api.VMSizeStandardD16dsV6: ver419, - api.VMSizeStandardD32dsV6: ver419, - api.VMSizeStandardD48dsV6: ver419, - api.VMSizeStandardD64dsV6: ver419, - api.VMSizeStandardD96dsV6: ver419, - - api.VMSizeStandardD4lsV6: ver419, - api.VMSizeStandardD8lsV6: ver419, - api.VMSizeStandardD16lsV6: ver419, - api.VMSizeStandardD32lsV6: ver419, - api.VMSizeStandardD48lsV6: ver419, - api.VMSizeStandardD64lsV6: ver419, - api.VMSizeStandardD96lsV6: ver419, - - api.VMSizeStandardD4ldsV6: ver419, - api.VMSizeStandardD8ldsV6: ver419, - api.VMSizeStandardD16ldsV6: ver419, - api.VMSizeStandardD32ldsV6: ver419, - api.VMSizeStandardD48ldsV6: ver419, - api.VMSizeStandardD64ldsV6: ver419, - api.VMSizeStandardD96ldsV6: ver419, - - api.VMSizeStandardL4sV4: ver419, - api.VMSizeStandardL8sV4: ver419, - api.VMSizeStandardL16sV4: ver419, - api.VMSizeStandardL32sV4: ver419, - api.VMSizeStandardL48sV4: ver419, - api.VMSizeStandardL64sV4: ver419, - api.VMSizeStandardL80sV4: ver419, -} - -var supportedMasterVmSizes = map[api.VMSize]api.VMSizeStruct{ - // General purpose - api.VMSizeStandardD8sV3: api.VMSizeStandardD8sV3Struct, - api.VMSizeStandardD16sV3: api.VMSizeStandardD16sV3Struct, - api.VMSizeStandardD32sV3: api.VMSizeStandardD32sV3Struct, - - api.VMSizeStandardD8sV4: api.VMSizeStandardD8sV4Struct, - api.VMSizeStandardD16sV4: api.VMSizeStandardD16sV4Struct, - api.VMSizeStandardD32sV4: api.VMSizeStandardD32sV4Struct, - - api.VMSizeStandardD8sV5: api.VMSizeStandardD8sV5Struct, - api.VMSizeStandardD16sV5: api.VMSizeStandardD16sV5Struct, - api.VMSizeStandardD32sV5: api.VMSizeStandardD32sV5Struct, - - api.VMSizeStandardD8asV4: api.VMSizeStandardD8asV4Struct, - api.VMSizeStandardD16asV4: api.VMSizeStandardD16asV4Struct, - api.VMSizeStandardD32asV4: api.VMSizeStandardD32asV4Struct, - - api.VMSizeStandardD8asV5: api.VMSizeStandardD8asV5Struct, - api.VMSizeStandardD16asV5: api.VMSizeStandardD16asV5Struct, - api.VMSizeStandardD32asV5: api.VMSizeStandardD32asV5Struct, - - api.VMSizeStandardD8dsV5: api.VMSizeStandardD8dsV5Struct, - api.VMSizeStandardD16dsV5: api.VMSizeStandardD16dsV5Struct, - api.VMSizeStandardD32dsV5: api.VMSizeStandardD32dsV5Struct, - - // Memory optimized - api.VMSizeStandardE8sV3: api.VMSizeStandardE8sV3Struct, - api.VMSizeStandardE16sV3: api.VMSizeStandardE16sV3Struct, - api.VMSizeStandardE32sV3: api.VMSizeStandardE32sV3Struct, - - api.VMSizeStandardE8sV4: api.VMSizeStandardE8sV4Struct, - api.VMSizeStandardE16sV4: api.VMSizeStandardE16sV4Struct, - api.VMSizeStandardE20sV4: api.VMSizeStandardE20sV4Struct, - api.VMSizeStandardE32sV4: api.VMSizeStandardE32sV4Struct, - api.VMSizeStandardE48sV4: api.VMSizeStandardE48sV4Struct, - api.VMSizeStandardE64sV4: api.VMSizeStandardE64sV4Struct, - - api.VMSizeStandardE8sV5: api.VMSizeStandardE8sV5Struct, - api.VMSizeStandardE16sV5: api.VMSizeStandardE16sV5Struct, - api.VMSizeStandardE20sV5: api.VMSizeStandardE20sV5Struct, - api.VMSizeStandardE32sV5: api.VMSizeStandardE32sV5Struct, - api.VMSizeStandardE48sV5: api.VMSizeStandardE48sV5Struct, - api.VMSizeStandardE64sV5: api.VMSizeStandardE64sV5Struct, - api.VMSizeStandardE96sV5: api.VMSizeStandardE96sV5Struct, - - api.VMSizeStandardE4asV4: api.VMSizeStandardE4asV4Struct, - api.VMSizeStandardE8asV4: api.VMSizeStandardE8asV4Struct, - api.VMSizeStandardE16asV4: api.VMSizeStandardE16asV4Struct, - api.VMSizeStandardE20asV4: api.VMSizeStandardE20asV4Struct, - api.VMSizeStandardE32asV4: api.VMSizeStandardE32asV4Struct, - api.VMSizeStandardE48asV4: api.VMSizeStandardE48asV4Struct, - api.VMSizeStandardE64asV4: api.VMSizeStandardE64asV4Struct, - api.VMSizeStandardE96asV4: api.VMSizeStandardE96asV4Struct, - - api.VMSizeStandardE8asV5: api.VMSizeStandardE8asV5Struct, - api.VMSizeStandardE16asV5: api.VMSizeStandardE16asV5Struct, - api.VMSizeStandardE20asV5: api.VMSizeStandardE20asV5Struct, - api.VMSizeStandardE32asV5: api.VMSizeStandardE32asV5Struct, - api.VMSizeStandardE48asV5: api.VMSizeStandardE48asV5Struct, - api.VMSizeStandardE64asV5: api.VMSizeStandardE64asV5Struct, - api.VMSizeStandardE96asV5: api.VMSizeStandardE96asV5Struct, - - api.VMSizeStandardE64isV3: api.VMSizeStandardE64isV3Struct, - api.VMSizeStandardE80isV4: api.VMSizeStandardE80isV4Struct, - api.VMSizeStandardE80idsV4: api.VMSizeStandardE80idsV4Struct, - api.VMSizeStandardE104isV5: api.VMSizeStandardE104isV5Struct, - api.VMSizeStandardE104idsV5: api.VMSizeStandardE104idsV5Struct, - - // Compute optimized - api.VMSizeStandardF72sV2: api.VMSizeStandardF72sV2Struct, - - // Memory and compute optimized - api.VMSizeStandardM128ms: api.VMSizeStandardM128msStruct, - - api.VMSizeStandardD4sV6: api.VMSizeStandardD4sV6Struct, - api.VMSizeStandardD8sV6: api.VMSizeStandardD8sV6Struct, - api.VMSizeStandardD16sV6: api.VMSizeStandardD16sV6Struct, - api.VMSizeStandardD32sV6: api.VMSizeStandardD32sV6Struct, - api.VMSizeStandardD48sV6: api.VMSizeStandardD48sV6Struct, - api.VMSizeStandardD64sV6: api.VMSizeStandardD64sV6Struct, - api.VMSizeStandardD96sV6: api.VMSizeStandardD96sV6Struct, - - api.VMSizeStandardD4dsV6: api.VMSizeStandardD4dsV6Struct, - api.VMSizeStandardD8dsV6: api.VMSizeStandardD8dsV6Struct, - api.VMSizeStandardD16dsV6: api.VMSizeStandardD16dsV6Struct, - api.VMSizeStandardD32dsV6: api.VMSizeStandardD32dsV6Struct, - api.VMSizeStandardD48dsV6: api.VMSizeStandardD48dsV6Struct, - api.VMSizeStandardD64dsV6: api.VMSizeStandardD64dsV6Struct, - api.VMSizeStandardD96dsV6: api.VMSizeStandardD96dsV6Struct, -} - -// Document support -var supportedWorkerVmSizes = map[api.VMSize]api.VMSizeStruct{ - // General purpose - api.VMSizeStandardD4sV3: api.VMSizeStandardD4sV3Struct, - api.VMSizeStandardD8sV3: api.VMSizeStandardD8sV3Struct, - api.VMSizeStandardD16sV3: api.VMSizeStandardD16sV3Struct, - api.VMSizeStandardD32sV3: api.VMSizeStandardD32sV3Struct, - - api.VMSizeStandardD4sV4: api.VMSizeStandardD4sV4Struct, - api.VMSizeStandardD8sV4: api.VMSizeStandardD8sV4Struct, - api.VMSizeStandardD16sV4: api.VMSizeStandardD16sV4Struct, - api.VMSizeStandardD32sV4: api.VMSizeStandardD32sV4Struct, - api.VMSizeStandardD64sV4: api.VMSizeStandardD64sV4Struct, - - api.VMSizeStandardD4sV5: api.VMSizeStandardD4sV5Struct, - api.VMSizeStandardD8sV5: api.VMSizeStandardD8sV5Struct, - api.VMSizeStandardD16sV5: api.VMSizeStandardD16sV5Struct, - api.VMSizeStandardD32sV5: api.VMSizeStandardD32sV5Struct, - api.VMSizeStandardD64sV5: api.VMSizeStandardD64sV5Struct, - api.VMSizeStandardD96sV5: api.VMSizeStandardD96sV5Struct, - - api.VMSizeStandardD4asV4: api.VMSizeStandardD4asV4Struct, - api.VMSizeStandardD8asV4: api.VMSizeStandardD8asV4Struct, - api.VMSizeStandardD16asV4: api.VMSizeStandardD16asV4Struct, - api.VMSizeStandardD32asV4: api.VMSizeStandardD32asV4Struct, - api.VMSizeStandardD64asV4: api.VMSizeStandardD64asV4Struct, - api.VMSizeStandardD96asV4: api.VMSizeStandardD96asV4Struct, - - api.VMSizeStandardD4asV5: api.VMSizeStandardD4asV5Struct, - api.VMSizeStandardD8asV5: api.VMSizeStandardD8asV5Struct, - api.VMSizeStandardD16asV5: api.VMSizeStandardD16asV5Struct, - api.VMSizeStandardD32asV5: api.VMSizeStandardD32asV5Struct, - api.VMSizeStandardD64asV5: api.VMSizeStandardD64asV5Struct, - api.VMSizeStandardD96asV5: api.VMSizeStandardD96asV5Struct, - - api.VMSizeStandardD4dsV5: api.VMSizeStandardD4dsV5Struct, - api.VMSizeStandardD8dsV5: api.VMSizeStandardD8dsV5Struct, - api.VMSizeStandardD16dsV5: api.VMSizeStandardD16dsV5Struct, - api.VMSizeStandardD32dsV5: api.VMSizeStandardD32dsV5Struct, - api.VMSizeStandardD48dsV5: api.VMSizeStandardD48dsV5Struct, - api.VMSizeStandardD64dsV5: api.VMSizeStandardD64dsV5Struct, - api.VMSizeStandardD96dsV5: api.VMSizeStandardD96dsV5Struct, - - // Memory optimized - api.VMSizeStandardE4sV3: api.VMSizeStandardE4sV3Struct, - api.VMSizeStandardE8sV3: api.VMSizeStandardE8sV3Struct, - api.VMSizeStandardE16sV3: api.VMSizeStandardE16sV3Struct, - api.VMSizeStandardE32sV3: api.VMSizeStandardE32sV3Struct, - - api.VMSizeStandardE2sV4: api.VMSizeStandardE2sV4Struct, - api.VMSizeStandardE4sV4: api.VMSizeStandardE4sV4Struct, - api.VMSizeStandardE8sV4: api.VMSizeStandardE8sV4Struct, - api.VMSizeStandardE16sV4: api.VMSizeStandardE16sV4Struct, - api.VMSizeStandardE20sV4: api.VMSizeStandardE20sV4Struct, - api.VMSizeStandardE32sV4: api.VMSizeStandardE32sV4Struct, - api.VMSizeStandardE48sV4: api.VMSizeStandardE48sV4Struct, - api.VMSizeStandardE64sV4: api.VMSizeStandardE64sV4Struct, - - api.VMSizeStandardE2sV5: api.VMSizeStandardE2sV5Struct, - api.VMSizeStandardE4sV5: api.VMSizeStandardE4sV5Struct, - api.VMSizeStandardE8sV5: api.VMSizeStandardE8sV5Struct, - api.VMSizeStandardE16sV5: api.VMSizeStandardE16sV5Struct, - api.VMSizeStandardE20sV5: api.VMSizeStandardE20sV5Struct, - api.VMSizeStandardE32sV5: api.VMSizeStandardE32sV5Struct, - api.VMSizeStandardE48sV5: api.VMSizeStandardE48sV5Struct, - api.VMSizeStandardE64sV5: api.VMSizeStandardE64sV5Struct, - api.VMSizeStandardE96sV5: api.VMSizeStandardE96sV5Struct, - - api.VMSizeStandardE4asV4: api.VMSizeStandardE4asV4Struct, - api.VMSizeStandardE8asV4: api.VMSizeStandardE8asV4Struct, - api.VMSizeStandardE16asV4: api.VMSizeStandardE16asV4Struct, - api.VMSizeStandardE20asV4: api.VMSizeStandardE20asV4Struct, - api.VMSizeStandardE32asV4: api.VMSizeStandardE32asV4Struct, - api.VMSizeStandardE48asV4: api.VMSizeStandardE48asV4Struct, - api.VMSizeStandardE64asV4: api.VMSizeStandardE64asV4Struct, - api.VMSizeStandardE96asV4: api.VMSizeStandardE96asV4Struct, - - api.VMSizeStandardE8asV5: api.VMSizeStandardE8asV5Struct, - api.VMSizeStandardE16asV5: api.VMSizeStandardE16asV5Struct, - api.VMSizeStandardE20asV5: api.VMSizeStandardE20asV5Struct, - api.VMSizeStandardE32asV5: api.VMSizeStandardE32asV5Struct, - api.VMSizeStandardE48asV5: api.VMSizeStandardE48asV5Struct, - api.VMSizeStandardE64asV5: api.VMSizeStandardE64asV5Struct, - api.VMSizeStandardE96asV5: api.VMSizeStandardE96asV5Struct, - - api.VMSizeStandardE64isV3: api.VMSizeStandardE64isV3Struct, - api.VMSizeStandardE80isV4: api.VMSizeStandardE80isV4Struct, - api.VMSizeStandardE80idsV4: api.VMSizeStandardE80idsV4Struct, - api.VMSizeStandardE96dsV5: api.VMSizeStandardE96dsV5Struct, - api.VMSizeStandardE104isV5: api.VMSizeStandardE104isV5Struct, - api.VMSizeStandardE104idsV5: api.VMSizeStandardE104idsV5Struct, - - // Compute optimized - api.VMSizeStandardF4sV2: api.VMSizeStandardF4sV2Struct, - api.VMSizeStandardF8sV2: api.VMSizeStandardF8sV2Struct, - api.VMSizeStandardF16sV2: api.VMSizeStandardF16sV2Struct, - api.VMSizeStandardF32sV2: api.VMSizeStandardF32sV2Struct, - api.VMSizeStandardF72sV2: api.VMSizeStandardF72sV2Struct, - - // Memory and compute optimized - api.VMSizeStandardM128ms: api.VMSizeStandardM128msStruct, - - // Storage optimized - api.VMSizeStandardL4s: api.VMSizeStandardL4sStruct, - api.VMSizeStandardL8s: api.VMSizeStandardL8sStruct, - api.VMSizeStandardL16s: api.VMSizeStandardL16sStruct, - api.VMSizeStandardL32s: api.VMSizeStandardL32sStruct, - - api.VMSizeStandardL8sV2: api.VMSizeStandardL8sV2Struct, - api.VMSizeStandardL16sV2: api.VMSizeStandardL16sV2Struct, - api.VMSizeStandardL32sV2: api.VMSizeStandardL32sV2Struct, - api.VMSizeStandardL48sV2: api.VMSizeStandardL48sV2Struct, - api.VMSizeStandardL64sV2: api.VMSizeStandardL64sV2Struct, - - api.VMSizeStandardL8sV3: api.VMSizeStandardL8sV3Struct, - api.VMSizeStandardL16sV3: api.VMSizeStandardL16sV3Struct, - api.VMSizeStandardL32sV3: api.VMSizeStandardL32sV3Struct, - api.VMSizeStandardL48sV3: api.VMSizeStandardL48sV3Struct, - api.VMSizeStandardL64sV3: api.VMSizeStandardL64sV3Struct, - - api.VMSizeStandardL4sV4: api.VMSizeStandardL4sV4Struct, - api.VMSizeStandardL8sV4: api.VMSizeStandardL8sV4Struct, - api.VMSizeStandardL16sV4: api.VMSizeStandardL16sV4Struct, - api.VMSizeStandardL32sV4: api.VMSizeStandardL32sV4Struct, - api.VMSizeStandardL48sV4: api.VMSizeStandardL48sV4Struct, - api.VMSizeStandardL64sV4: api.VMSizeStandardL64sV4Struct, - api.VMSizeStandardL80sV4: api.VMSizeStandardL80sV4Struct, - - // GPU nodes - // the formatting of the ncasv3_t4 family is different. This can be seen through a - // az vm list-usage -l eastus - api.VMSizeStandardNC4asT4V3: api.VMSizeStandardNC4asT4V3Struct, - api.VMSizeStandardNC8asT4V3: api.VMSizeStandardNC8asT4V3Struct, - api.VMSizeStandardNC16asT4V3: api.VMSizeStandardNC16asT4V3Struct, - api.VMSizeStandardNC64asT4V3: api.VMSizeStandardNC64asT4V3Struct, - - api.VMSizeStandardNC6sV3: api.VMSizeStandardNC6sV3Struct, - api.VMSizeStandardNC12sV3: api.VMSizeStandardNC12sV3Struct, - api.VMSizeStandardNC24sV3: api.VMSizeStandardNC24sV3Struct, - api.VMSizeStandardNC24rsV3: api.VMSizeStandardNC24rsV3Struct, - - api.VMSizeStandardD4sV6: api.VMSizeStandardD4sV6Struct, - api.VMSizeStandardD8sV6: api.VMSizeStandardD8sV6Struct, - api.VMSizeStandardD16sV6: api.VMSizeStandardD16sV6Struct, - api.VMSizeStandardD32sV6: api.VMSizeStandardD32sV6Struct, - api.VMSizeStandardD48sV6: api.VMSizeStandardD48sV6Struct, - api.VMSizeStandardD64sV6: api.VMSizeStandardD64sV6Struct, - api.VMSizeStandardD96sV6: api.VMSizeStandardD96sV6Struct, - - api.VMSizeStandardD4dsV6: api.VMSizeStandardD4dsV6Struct, - api.VMSizeStandardD8dsV6: api.VMSizeStandardD8dsV6Struct, - api.VMSizeStandardD16dsV6: api.VMSizeStandardD16dsV6Struct, - api.VMSizeStandardD32dsV6: api.VMSizeStandardD32dsV6Struct, - api.VMSizeStandardD48dsV6: api.VMSizeStandardD48dsV6Struct, - api.VMSizeStandardD64dsV6: api.VMSizeStandardD64dsV6Struct, - api.VMSizeStandardD96dsV6: api.VMSizeStandardD96dsV6Struct, - - api.VMSizeStandardD4lsV6: api.VMSizeStandardD4lsV6Struct, - api.VMSizeStandardD8lsV6: api.VMSizeStandardD8lsV6Struct, - api.VMSizeStandardD16lsV6: api.VMSizeStandardD16lsV6Struct, - api.VMSizeStandardD32lsV6: api.VMSizeStandardD32lsV6Struct, - api.VMSizeStandardD48lsV6: api.VMSizeStandardD48lsV6Struct, - api.VMSizeStandardD64lsV6: api.VMSizeStandardD64lsV6Struct, - api.VMSizeStandardD96lsV6: api.VMSizeStandardD96lsV6Struct, - - api.VMSizeStandardD4ldsV6: api.VMSizeStandardD4ldsV6Struct, - api.VMSizeStandardD8ldsV6: api.VMSizeStandardD8ldsV6Struct, - api.VMSizeStandardD16ldsV6: api.VMSizeStandardD16ldsV6Struct, - api.VMSizeStandardD32ldsV6: api.VMSizeStandardD32ldsV6Struct, - api.VMSizeStandardD48ldsV6: api.VMSizeStandardD48ldsV6Struct, - api.VMSizeStandardD64ldsV6: api.VMSizeStandardD64ldsV6Struct, - api.VMSizeStandardD96ldsV6: api.VMSizeStandardD96ldsV6Struct, -} - func DiskSizeIsValid(sizeGB int) bool { return sizeGB >= 128 } -func VMSizeIsValid(vmSize api.VMSize, requireD2sWorkers, isMaster bool) bool { - if isMaster { - _, supportedAsMaster := SupportedVMSizesByRole(VMRoleMaster)[vmSize] - return supportedAsMaster +func getSupportedVMSizesByRole(isCI bool) map[vms.VMRole]map[vms.VMSize]vms.VMSizeStruct { + if isCI { + return vms.SupportedVMSizesByRoleForTesting } + return vms.SupportedVMSizesByRole +} - if requireD2sWorkers { - switch vmSize { - case api.VMSizeStandardD2sV3, api.VMSizeStandardD2sV4, api.VMSizeStandardD2sV5: - return true - default: - return false - } +func VMSizeIsValid(vmSize vms.VMSize, isMaster bool, isCI bool) bool { + role := vms.VMRoleWorker + if isMaster { + role = vms.VMRoleMaster } - _, supportedAsWorker := SupportedVMSizesByRole(VMRoleWorker)[vmSize] - return supportedAsWorker + supportedSizes := getSupportedVMSizesByRole(isCI) + _, supported := supportedSizes[role][vmSize] + return supported } // VMSizeIsValidForVersion validates VM size with version-specific restrictions -func VMSizeIsValidForVersion(vmSize api.VMSize, requireD2sWorkers, isMaster bool, v string) VMValidity { +func VMSizeIsValidForVersion(vmSize vms.VMSize, isMaster bool, v string, isCI bool) bool { // First check basic validity - if !VMSizeIsValid(vmSize, requireD2sWorkers, isMaster) { - return VMValidityNotSupportedForRole + if !VMSizeIsValid(vmSize, isMaster, isCI) { + return false } - // If we can't parse the version, just trust the above VMSizeIsValid. The - // only reason that the version would not be parseable is because it came - // from the cluster during enrichment, and is therefore potentially empty or - // nonsense -- this will always be pre-checked by this point during - // installs. - clusterVersion, err := version.ParseVersion(v) - if err != nil { - return VMValidityOK - } - // Check version-specific restrictions + role := vms.VMRoleWorker if isMaster { - if minVersion, exists := masterVmSizesWithMinimumVersion[vmSize]; exists { - if clusterVersion.Lt(minVersion) { - return VMValidityNotSupportedInVersion - } - } - } else { - if minVersion, exists := workerVmSizesWithMinimumVersion[vmSize]; exists { - if clusterVersion.Lt(minVersion) { - return VMValidityNotSupportedInVersion - } - } + role = vms.VMRoleMaster } - // VM size has no version restrictions or passed all checks - return VMValidityOK -} - -func VMSizeFromName(vmSize api.VMSize) (api.VMSizeStruct, bool) { - // the D2s versions are for development purposes only and don't show up in - // SupportedVMSizesByRole - switch vmSize { - case api.VMSizeStandardD2sV3: - return api.VMSizeStandardD2sV3Struct, true - case api.VMSizeStandardD2sV4: - return api.VMSizeStandardD2sV4Struct, true - case api.VMSizeStandardD2sV5: - return api.VMSizeStandardD2sV5Struct, true - } + supportedSizes := getSupportedVMSizesByRole(isCI) + sizeInfo := supportedSizes[role][vmSize] - if size, ok := SupportedVMSizesByRole(VMRoleWorker)[vmSize]; ok { - return size, true + // If the VM size has a minimum version requirement, check it + if sizeInfo.MinimumVersion != nil { + clusterVersion, err := version.ParseVersion(v) + if err != nil { + return false + } + return clusterVersion.Gt(sizeInfo.MinimumVersion) || clusterVersion.Eq(sizeInfo.MinimumVersion) } - if size, ok := SupportedVMSizesByRole(VMRoleMaster)[vmSize]; ok { - return size, true - } + // VM size has no version restrictions + return true +} - return api.VMSizeStruct{}, false +func VMSizeFromName(vmSize vms.VMSize) (vms.VMSizeStruct, bool) { + return vms.LookupVMSize(vmSize) } diff --git a/pkg/api/validate/vm_test.go b/pkg/api/validate/vm_test.go index d4e392971fa..bbcb762706a 100644 --- a/pkg/api/validate/vm_test.go +++ b/pkg/api/validate/vm_test.go @@ -6,7 +6,7 @@ package validate import ( "testing" - "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" ) func TestDiskSizeIsValid(t *testing.T) { @@ -27,6 +27,7 @@ func TestDiskSizeIsValid(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { + t.Parallel() result := DiskSizeIsValid(tt.diskSize) if result != tt.desiredResult { @@ -38,85 +39,88 @@ func TestDiskSizeIsValid(t *testing.T) { func TestVMSizeIsValid(t *testing.T) { for _, tt := range []struct { - name string - vmSize api.VMSize - requireD2sWorkers bool - isMaster bool - desiredResult bool + name string + vmSize vms.VMSize + isMaster bool + isCI bool + desiredResult bool }{ + // Production mode tests { - name: "vmSize is supported for use in ARO as worker node", - vmSize: api.VMSizeStandardF72sV2, - requireD2sWorkers: false, - isMaster: false, - desiredResult: true, + name: "vmSize is supported for use in ARO as worker node", + vmSize: vms.VMSizeStandardF72sV2, + isMaster: false, + isCI: false, + desiredResult: true, }, { - name: "vmSize is not supported for use in ARO as worker node", - vmSize: api.VMSize("Unsupported_Csv_v6"), - requireD2sWorkers: false, - isMaster: false, - desiredResult: false, + name: "vmSize is not supported for use in ARO as worker node", + vmSize: vms.VMSize("Unsupported_Csv_v6"), + isMaster: false, + isCI: false, + desiredResult: false, }, { - name: "vmSize is supported for use in ARO as master node", - vmSize: api.VMSizeStandardF72sV2, - requireD2sWorkers: false, - isMaster: true, - desiredResult: true, + name: "vmSize is supported for use in ARO as master node", + vmSize: vms.VMSizeStandardF72sV2, + isMaster: true, + isCI: false, + desiredResult: true, }, { - name: "vmSize is not supported for use in ARO as master node", - vmSize: api.VMSizeStandardD2sV3, - requireD2sWorkers: false, - isMaster: true, - desiredResult: false, + name: "vmSize is not supported for use in ARO as master node", + vmSize: vms.VMSizeStandardD2sV3, + isMaster: true, + isCI: false, + desiredResult: false, }, { - name: "install requires Standard_D2s workers, worker vmSize is not any supported D2s size", - vmSize: api.VMSizeStandardF72sV2, - requireD2sWorkers: true, - isMaster: false, - desiredResult: false, + name: "Lsv4 vmSize is supported for use in ARO as worker node", + vmSize: vms.VMSizeStandardL8sV4, + isMaster: false, + isCI: false, + desiredResult: true, }, + // CI mode tests { - name: "install requires Standard_D2s workers, worker vmSize is Standard_D2s_v3", - vmSize: api.VMSizeStandardD2sV3, - requireD2sWorkers: true, - isMaster: false, - desiredResult: true, + name: "CI mode: Standard_D2s_v3 is valid as worker", + vmSize: vms.VMSizeStandardD2sV3, + isMaster: false, + isCI: true, + desiredResult: true, }, { - name: "install requires Standard_D2s workers, worker vmSize is Standard_D2s_v4", - vmSize: api.VMSizeStandardD2sV4, - requireD2sWorkers: true, - isMaster: false, - desiredResult: true, + name: "CI mode: Standard_D2s_v4 is valid as worker", + vmSize: vms.VMSizeStandardD2sV4, + isMaster: false, + isCI: true, + desiredResult: true, }, { - name: "install requires Standard_D2s workers, worker vmSize is Standard_D2s_v5", - vmSize: api.VMSizeStandardD2sV5, - requireD2sWorkers: true, - isMaster: false, - desiredResult: true, + name: "CI mode: Standard_D2s_v5 is valid as worker", + vmSize: vms.VMSizeStandardD2sV5, + isMaster: false, + isCI: true, + desiredResult: true, }, { - name: "install requires Standard_D2s_v3 workers, vmSize is is a master", - vmSize: api.VMSizeStandardF72sV2, - requireD2sWorkers: true, - isMaster: true, - desiredResult: true, + name: "CI mode: Standard_F72s_v2 is valid as master", + vmSize: vms.VMSizeStandardF72sV2, + isMaster: true, + isCI: true, + desiredResult: true, }, { - name: "Lsv4 vmSize is supported for use in ARO as worker node", - vmSize: api.VMSizeStandardL8sV4, - requireD2sWorkers: false, - isMaster: false, - desiredResult: true, + name: "CI mode: Standard_D4s_v3 is valid as master", + vmSize: vms.VMSizeStandardD4sV3, + isMaster: true, + isCI: true, + desiredResult: true, }, } { t.Run(tt.name, func(t *testing.T) { - result := VMSizeIsValid(tt.vmSize, tt.requireD2sWorkers, tt.isMaster) + t.Parallel() + result := VMSizeIsValid(tt.vmSize, tt.isMaster, tt.isCI) if result != tt.desiredResult { t.Errorf("Want %v, got %v", tt.desiredResult, result) @@ -127,453 +131,446 @@ func TestVMSizeIsValid(t *testing.T) { func TestVMSizeIsValidForVersion(t *testing.T) { for _, tt := range []struct { - name string - vmSize api.VMSize - requireD2sWorkers bool - isMaster bool - version string - desiredResult VMValidity + name string + vmSize vms.VMSize + isMaster bool + version string + isCI bool + desiredResult bool }{ // 4.19+ Master/Control Plane VM sizes - DSv6 series { - name: "Standard_D8s_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16s_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD16sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16s_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD16sV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32s_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD32sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32s_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD32sV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64s_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD64sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64s_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD64sV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96s_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD96sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96s_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD96sV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Master/Control Plane VM sizes - DDSv6 series { - name: "Standard_D8ds_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD8dsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8ds_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD8dsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16ds_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD16dsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16ds_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD16dsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32ds_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD32dsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32ds_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD32dsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64ds_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD64dsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64ds_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD64dsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96ds_v6 is valid for 4.19 master", - vmSize: api.VMSizeStandardD96dsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96ds_v6 is valid for 4.19 master", + vmSize: vms.VMSizeStandardD96dsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Worker VM sizes - DSv6 series { - name: "Standard_D8s_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16s_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD16sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16s_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD16sV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32s_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD32sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32s_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD32sV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64s_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD64sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64s_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD64sV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96s_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD96sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96s_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD96sV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Worker VM sizes - DDSv6 series { - name: "Standard_D8ds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD8dsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8ds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD8dsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16ds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD16dsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16ds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD16dsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32ds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD32dsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32ds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD32dsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64ds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD64dsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64ds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD64dsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96ds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD96dsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96ds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD96dsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Worker VM sizes - DLSv6 series (worker only) { - name: "Standard_D4ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD4lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D4ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD4lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D8ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD8lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD8lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD16lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD16lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD32lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD32lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D48ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD48lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D48ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD48lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD64lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD64lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96ls_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD96lsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96ls_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD96lsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Worker VM sizes - DLDSv6 series (worker only) { - name: "Standard_D4lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD4ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D4lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD4ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D8lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD8ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D8lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD8ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D16lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD16ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D16lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD16ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D32lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD32ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D32lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD32ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D48lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD48ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D48lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD48ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D64lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD64ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D64lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD64ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_D96lds_v6 is valid for 4.19 worker", - vmSize: api.VMSizeStandardD96ldsV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_D96lds_v6 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardD96ldsV6, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // 4.19+ Worker VM sizes - LSv4 series { - name: "Standard_L8s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL8sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L8s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL8sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_L16s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL16sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L16s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL16sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_L32s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL32sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L32s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL32sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_L48s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL48sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L48s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL48sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_L64s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL64sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L64s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL64sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_L80s_v4 is valid for 4.19 worker", - vmSize: api.VMSizeStandardL80sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.19.0", - desiredResult: VMValidityOK, + name: "Standard_L80s_v4 is valid for 4.19 worker", + vmSize: vms.VMSizeStandardL80sV4, + isMaster: false, + version: "4.19.0", + isCI: false, + desiredResult: true, }, // DLSv6 and DLDSv6 are not supported for master/control plane { - name: "Standard_D4ls_v6 is not valid for 4.19 master", - vmSize: api.VMSizeStandardD4lsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityNotSupportedForRole, + name: "Standard_D4ls_v6 is not valid for 4.19 master", + vmSize: vms.VMSizeStandardD4lsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: false, }, { - name: "Standard_D4lds_v6 is not valid for 4.19 master", - vmSize: api.VMSizeStandardD4ldsV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.0", - desiredResult: VMValidityNotSupportedForRole, + name: "Standard_D4lds_v6 is not valid for 4.19 master", + vmSize: vms.VMSizeStandardD4ldsV6, + isMaster: true, + version: "4.19.0", + isCI: false, + desiredResult: false, }, // Test older versions (< 4.19) - should not support new v6 instances directly { - name: "Standard_D8s_v6 is not valid for 4.18 master", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.18.0", - desiredResult: VMValidityNotSupportedInVersion, + name: "Standard_D8s_v6 falls back to standard validation for 4.18 master", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "4.18.0", + isCI: false, + desiredResult: false, }, { - name: "Standard_D8s_v6 is not valid for 4.18 worker", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: false, - version: "4.18.0", - desiredResult: VMValidityNotSupportedInVersion, + name: "Standard_D8s_v6 falls back to standard validation for 4.18 worker", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: false, + version: "4.18.0", + isCI: false, + desiredResult: false, }, // Test version edge cases { - name: "Standard_D8s_v6 is valid for 4.19.1 master", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.1", - desiredResult: VMValidityOK, - }, - { - name: "Standard_D8s_v6 is valid for 4.19.1-nightly master", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.19.1-nightly", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 is valid for 4.19.1 master", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "4.19.1", + isCI: false, + desiredResult: true, }, { - name: "Standard_D8s_v6 is valid for 4.20.0 master", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "4.20.0", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 is valid for 4.20.0 master", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "4.20.0", + isCI: false, + desiredResult: true, }, // Test invalid version strings { - name: "Standard_D8s_v6 with invalid version falls back to old validation", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "invalid.version", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 with invalid version falls back to old validation", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "invalid.version", + isCI: false, + desiredResult: false, }, { - name: "Standard_D8s_v6 with empty version falls back to old validation", - vmSize: api.VMSizeStandardD8sV6, - requireD2sWorkers: false, - isMaster: true, - version: "", - desiredResult: VMValidityOK, + name: "Standard_D8s_v6 with empty version falls back to old validation", + vmSize: vms.VMSizeStandardD8sV6, + isMaster: true, + version: "", + isCI: false, + desiredResult: false, }, // Test existing VM sizes still work with version validation { - name: "Standard_D8s_v5 is valid for any version as master", - vmSize: api.VMSizeStandardD8sV5, - requireD2sWorkers: false, - isMaster: true, - version: "4.18.0", - desiredResult: VMValidityOK, + name: "Standard_D8s_v5 is valid for any version as master", + vmSize: vms.VMSizeStandardD8sV5, + isMaster: true, + version: "4.18.0", + isCI: false, + desiredResult: true, }, { - name: "Standard_F72s_v2 is valid for any version as worker", - vmSize: api.VMSizeStandardF72sV2, - requireD2sWorkers: false, - isMaster: false, - version: "4.18.0", - desiredResult: VMValidityOK, + name: "Standard_F72s_v2 is valid for any version as worker", + vmSize: vms.VMSizeStandardF72sV2, + isMaster: false, + version: "4.18.0", + isCI: false, + desiredResult: true, }, // Test LSv4 instances with older versions (< 4.19) - should not be supported { - name: "Standard_L8s_v4 falls back to standard validation for 4.18 worker", - vmSize: api.VMSizeStandardL8sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.18.0", - desiredResult: VMValidityNotSupportedInVersion, + name: "Standard_L8s_v4 falls back to standard validation for 4.18 worker", + vmSize: vms.VMSizeStandardL8sV4, + isMaster: false, + version: "4.18.0", + isCI: false, + desiredResult: false, }, { - name: "Standard_L80s_v4 falls back to standard validation for 4.18 worker", - vmSize: api.VMSizeStandardL80sV4, - requireD2sWorkers: false, - isMaster: false, - version: "4.18.0", - desiredResult: VMValidityNotSupportedInVersion, + name: "Standard_L80s_v4 falls back to standard validation for 4.18 worker", + vmSize: vms.VMSizeStandardL80sV4, + isMaster: false, + version: "4.18.0", + isCI: false, + desiredResult: false, }, } { t.Run(tt.name, func(t *testing.T) { - result := VMSizeIsValidForVersion(tt.vmSize, tt.requireD2sWorkers, tt.isMaster, tt.version) + t.Parallel() + result := VMSizeIsValidForVersion(tt.vmSize, tt.isMaster, tt.version, tt.isCI) if result != tt.desiredResult { t.Errorf("Want %v, got %v", tt.desiredResult, result) From bd95b6023c1fa397ea738323677a38e657fa8a69 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 17:16:00 +0200 Subject: [PATCH 03/12] [ARO-24603] Remove FeatureRequireD2sWorkers and update callers to use IsCI Remove the FeatureRequireD2sWorkers feature flag and replace all callers with env.IsCI(). Update frontend, operator, cluster tooling, and clusterdata packages to use vms.VMSize types and the new Static() validator signature. Use vms.GetCICandidateMasterVMSizes() and vms.GetCICandidateWorkerVMSizes() with shuffle-by-core-tier for cost-effective quota spreading in CI. Co-Authored-By: Claude Opus 4.6 --- pkg/cluster/loadbalancerinternal_test.go | 9 ++- pkg/cluster/validate_test.go | 25 ++++--- pkg/deploy/devconfig.go | 1 - pkg/env/dev.go | 1 - pkg/env/env.go | 1 - pkg/env/zz_generated_feature_enumer.go | 60 +++++++-------- ...enshiftcluster_resize_controlplane_test.go | 3 +- ...penshiftcluster_vmresize_pre_validation.go | 9 ++- ...iftcluster_vmresize_pre_validation_test.go | 15 ++-- .../admin_supportedvmsizes_list_test.go | 21 +++--- pkg/frontend/admin_supportvmsizes_list.go | 10 +-- .../openshiftcluster_preflightvalidation.go | 5 +- ...enshiftcluster_preflightvalidation_test.go | 57 ++++++++------- pkg/frontend/openshiftcluster_putorpatch.go | 5 +- .../openshiftcluster_putorpatch_test.go | 9 ++- pkg/frontend/quota_validation.go | 5 +- pkg/frontend/shared_test.go | 2 +- pkg/frontend/sku_test.go | 11 +-- pkg/frontend/validate.go | 4 +- pkg/operator/controllers/machine/machine.go | 6 ++ pkg/util/cluster/cluster.go | 73 +++++++------------ pkg/util/clusterdata/worker_profile.go | 3 +- pkg/util/clusterdata/worker_profile_test.go | 5 +- 23 files changed, 162 insertions(+), 178 deletions(-) diff --git a/pkg/cluster/loadbalancerinternal_test.go b/pkg/cluster/loadbalancerinternal_test.go index 0e221db963d..c9d0769c926 100644 --- a/pkg/cluster/loadbalancerinternal_test.go +++ b/pkg/cluster/loadbalancerinternal_test.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/util/computeskus" mock_armcompute "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/azuresdk/armcompute" mock_armnetwork "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/azuresdk/armnetwork" @@ -97,7 +98,7 @@ func TestUpdateLoadBalancerZonalNoopAndErrorPaths(t *testing.T) { sku.EXPECT().List(gomock.Any(), "location eq eastus", false). Return(func(yield func(*armcompute.ResourceSKU, error) bool) { yield(&armcompute.ResourceSKU{ - Name: pointerutils.ToPtr(string(api.VMSizeStandardD16asV4)), + Name: pointerutils.ToPtr(string(vms.VMSizeStandardD16asV4)), Locations: pointerutils.ToSlicePtr([]string{"eastus"}), LocationInfo: pointerutils.ToSlicePtr([]armcompute.ResourceSKULocationInfo{ { @@ -200,7 +201,7 @@ func TestUpdateLoadBalancerZonalNoopAndErrorPaths(t *testing.T) { LoadBalancerProfile: &api.LoadBalancerProfile{}, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, SubnetID: "subnetID", }, APIServerProfile: api.APIServerProfile{ @@ -380,7 +381,7 @@ func TestUpdateLoadBalancerZonalMigration(t *testing.T) { skus.EXPECT().List(gomock.Any(), "location eq eastus", false). Return(func(yield func(*armcompute.ResourceSKU, error) bool) { yield(&armcompute.ResourceSKU{ - Name: pointerutils.ToPtr(string(api.VMSizeStandardD16asV4)), + Name: pointerutils.ToPtr(string(vms.VMSizeStandardD16asV4)), Locations: pointerutils.ToSlicePtr([]string{"eastus"}), LocationInfo: pointerutils.ToSlicePtr([]armcompute.ResourceSKULocationInfo{ {Zones: pointerutils.ToSlicePtr([]string{"1", "2", "3"})}, @@ -564,7 +565,7 @@ func TestUpdateLoadBalancerZonalMigration(t *testing.T) { LoadBalancerProfile: &api.LoadBalancerProfile{}, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, SubnetID: "subnetID", }, APIServerProfile: api.APIServerProfile{ diff --git a/pkg/cluster/validate_test.go b/pkg/cluster/validate_test.go index accd6b480a3..947ab06e659 100644 --- a/pkg/cluster/validate_test.go +++ b/pkg/cluster/validate_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/util/computeskus" mock_armcompute "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/azuresdk/armcompute" @@ -26,8 +27,8 @@ import ( func TestValidateZones(t *testing.T) { key := "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resourceGroup/providers/Microsoft.RedHatOpenShift/openShiftClusters/resourceName1" - controlPlaneSku := string(api.VMSizeStandardD16asV4) - workerProfileSku := string(api.VMSizeStandardD8asV4) + controlPlaneSku := string(vms.VMSizeStandardD16asV4) + workerProfileSku := string(vms.VMSizeStandardD8asV4) type test struct { name string @@ -51,11 +52,11 @@ func TestValidateZones(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, }, WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSizeStandardD8asV4, + VMSize: vms.VMSizeStandardD8asV4, }, }, Zones: []string{}, @@ -71,11 +72,11 @@ func TestValidateZones(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, }, WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSizeStandardD8asV4, + VMSize: vms.VMSizeStandardD8asV4, }, }, Zones: []string{"1", "2", "3"}, @@ -91,11 +92,11 @@ func TestValidateZones(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, }, WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSizeStandardD8asV4, + VMSize: vms.VMSizeStandardD8asV4, }, }, Zones: []string{"1", "2", "3"}, @@ -112,11 +113,11 @@ func TestValidateZones(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD16asV4, + VMSize: vms.VMSizeStandardD16asV4, }, WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSizeStandardD8asV4, + VMSize: vms.VMSizeStandardD8asV4, }, }, Zones: []string{"1", "2", "3", "4"}, @@ -202,11 +203,11 @@ func TestValidateZones(t *testing.T) { Properties: api.OpenShiftClusterProperties{ WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSize(workerProfileSku), + VMSize: vms.VMSize(workerProfileSku), }, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSize(controlPlaneSku), + VMSize: vms.VMSize(controlPlaneSku), }, }, }, diff --git a/pkg/deploy/devconfig.go b/pkg/deploy/devconfig.go index 85dd06da061..c4c378c10c9 100644 --- a/pkg/deploy/devconfig.go +++ b/pkg/deploy/devconfig.go @@ -137,7 +137,6 @@ func DevConfig(_env env.Core) (*Config, error) { "DisableDenyAssignments", "DisableSignedCertificates", "EnableDevelopmentAuthorizer", - "RequireD2sWorkers", "DisableReadinessDelay", "RequireOIDCStorageWebEndpoint", "UseMockMsiRp", diff --git a/pkg/env/dev.go b/pkg/env/dev.go index 81c09bd813b..79e2b9bc1cf 100644 --- a/pkg/env/dev.go +++ b/pkg/env/dev.go @@ -39,7 +39,6 @@ func newDev(ctx context.Context, log *logrus.Entry, component ServiceName) (Inte for _, feature := range []Feature{ FeatureDisableDenyAssignments, FeatureDisableSignedCertificates, - FeatureRequireD2sWorkers, FeatureDisableReadinessDelay, FeatureRequireOIDCStorageWebEndpoint, FeatureUseMockMsiRp, diff --git a/pkg/env/env.go b/pkg/env/env.go index 0d39693c327..f83a7135f2d 100644 --- a/pkg/env/env.go +++ b/pkg/env/env.go @@ -39,7 +39,6 @@ const ( FeatureDisableDenyAssignments Feature = iota FeatureDisableSignedCertificates FeatureEnableDevelopmentAuthorizer - FeatureRequireD2sWorkers FeatureDisableReadinessDelay FeatureRequireOIDCStorageWebEndpoint FeatureUseMockMsiRp diff --git a/pkg/env/zz_generated_feature_enumer.go b/pkg/env/zz_generated_feature_enumer.go index 5b56e39622b..1205c7bed1e 100644 --- a/pkg/env/zz_generated_feature_enumer.go +++ b/pkg/env/zz_generated_feature_enumer.go @@ -7,11 +7,11 @@ import ( "strings" ) -const _FeatureName = "FeatureDisableDenyAssignmentsFeatureDisableSignedCertificatesFeatureEnableDevelopmentAuthorizerFeatureRequireD2sWorkersFeatureDisableReadinessDelayFeatureRequireOIDCStorageWebEndpointFeatureUseMockMsiRpFeatureEnableMISEFeatureEnforceMISEFeatureEnableClusterExpandedAvailabilityZones" +const _FeatureName = "FeatureDisableDenyAssignmentsFeatureDisableSignedCertificatesFeatureEnableDevelopmentAuthorizerFeatureDisableReadinessDelayFeatureRequireOIDCStorageWebEndpointFeatureUseMockMsiRpFeatureEnableMISEFeatureEnforceMISEFeatureEnableClusterExpandedAvailabilityZones" -var _FeatureIndex = [...]uint16{0, 29, 61, 95, 119, 147, 183, 202, 219, 237, 282} +var _FeatureIndex = [...]uint16{0, 29, 61, 95, 123, 159, 178, 195, 213, 258} -const _FeatureLowerName = "featuredisabledenyassignmentsfeaturedisablesignedcertificatesfeatureenabledevelopmentauthorizerfeaturerequired2sworkersfeaturedisablereadinessdelayfeaturerequireoidcstoragewebendpointfeatureusemockmsirpfeatureenablemisefeatureenforcemisefeatureenableclusterexpandedavailabilityzones" +const _FeatureLowerName = "featuredisabledenyassignmentsfeaturedisablesignedcertificatesfeatureenabledevelopmentauthorizerfeaturedisablereadinessdelayfeaturerequireoidcstoragewebendpointfeatureusemockmsirpfeatureenablemisefeatureenforcemisefeatureenableclusterexpandedavailabilityzones" func (i Feature) String() string { if i < 0 || i >= Feature(len(_FeatureIndex)-1) { @@ -27,16 +27,15 @@ func _FeatureNoOp() { _ = x[FeatureDisableDenyAssignments-(0)] _ = x[FeatureDisableSignedCertificates-(1)] _ = x[FeatureEnableDevelopmentAuthorizer-(2)] - _ = x[FeatureRequireD2sWorkers-(3)] - _ = x[FeatureDisableReadinessDelay-(4)] - _ = x[FeatureRequireOIDCStorageWebEndpoint-(5)] - _ = x[FeatureUseMockMsiRp-(6)] - _ = x[FeatureEnableMISE-(7)] - _ = x[FeatureEnforceMISE-(8)] - _ = x[FeatureEnableClusterExpandedAvailabilityZones-(9)] + _ = x[FeatureDisableReadinessDelay-(3)] + _ = x[FeatureRequireOIDCStorageWebEndpoint-(4)] + _ = x[FeatureUseMockMsiRp-(5)] + _ = x[FeatureEnableMISE-(6)] + _ = x[FeatureEnforceMISE-(7)] + _ = x[FeatureEnableClusterExpandedAvailabilityZones-(8)] } -var _FeatureValues = []Feature{FeatureDisableDenyAssignments, FeatureDisableSignedCertificates, FeatureEnableDevelopmentAuthorizer, FeatureRequireD2sWorkers, FeatureDisableReadinessDelay, FeatureRequireOIDCStorageWebEndpoint, FeatureUseMockMsiRp, FeatureEnableMISE, FeatureEnforceMISE, FeatureEnableClusterExpandedAvailabilityZones} +var _FeatureValues = []Feature{FeatureDisableDenyAssignments, FeatureDisableSignedCertificates, FeatureEnableDevelopmentAuthorizer, FeatureDisableReadinessDelay, FeatureRequireOIDCStorageWebEndpoint, FeatureUseMockMsiRp, FeatureEnableMISE, FeatureEnforceMISE, FeatureEnableClusterExpandedAvailabilityZones} var _FeatureNameToValueMap = map[string]Feature{ _FeatureName[0:29]: FeatureDisableDenyAssignments, @@ -45,33 +44,30 @@ var _FeatureNameToValueMap = map[string]Feature{ _FeatureLowerName[29:61]: FeatureDisableSignedCertificates, _FeatureName[61:95]: FeatureEnableDevelopmentAuthorizer, _FeatureLowerName[61:95]: FeatureEnableDevelopmentAuthorizer, - _FeatureName[95:119]: FeatureRequireD2sWorkers, - _FeatureLowerName[95:119]: FeatureRequireD2sWorkers, - _FeatureName[119:147]: FeatureDisableReadinessDelay, - _FeatureLowerName[119:147]: FeatureDisableReadinessDelay, - _FeatureName[147:183]: FeatureRequireOIDCStorageWebEndpoint, - _FeatureLowerName[147:183]: FeatureRequireOIDCStorageWebEndpoint, - _FeatureName[183:202]: FeatureUseMockMsiRp, - _FeatureLowerName[183:202]: FeatureUseMockMsiRp, - _FeatureName[202:219]: FeatureEnableMISE, - _FeatureLowerName[202:219]: FeatureEnableMISE, - _FeatureName[219:237]: FeatureEnforceMISE, - _FeatureLowerName[219:237]: FeatureEnforceMISE, - _FeatureName[237:282]: FeatureEnableClusterExpandedAvailabilityZones, - _FeatureLowerName[237:282]: FeatureEnableClusterExpandedAvailabilityZones, + _FeatureName[95:123]: FeatureDisableReadinessDelay, + _FeatureLowerName[95:123]: FeatureDisableReadinessDelay, + _FeatureName[123:159]: FeatureRequireOIDCStorageWebEndpoint, + _FeatureLowerName[123:159]: FeatureRequireOIDCStorageWebEndpoint, + _FeatureName[159:178]: FeatureUseMockMsiRp, + _FeatureLowerName[159:178]: FeatureUseMockMsiRp, + _FeatureName[178:195]: FeatureEnableMISE, + _FeatureLowerName[178:195]: FeatureEnableMISE, + _FeatureName[195:213]: FeatureEnforceMISE, + _FeatureLowerName[195:213]: FeatureEnforceMISE, + _FeatureName[213:258]: FeatureEnableClusterExpandedAvailabilityZones, + _FeatureLowerName[213:258]: FeatureEnableClusterExpandedAvailabilityZones, } var _FeatureNames = []string{ _FeatureName[0:29], _FeatureName[29:61], _FeatureName[61:95], - _FeatureName[95:119], - _FeatureName[119:147], - _FeatureName[147:183], - _FeatureName[183:202], - _FeatureName[202:219], - _FeatureName[219:237], - _FeatureName[237:282], + _FeatureName[95:123], + _FeatureName[123:159], + _FeatureName[159:178], + _FeatureName[178:195], + _FeatureName[195:213], + _FeatureName[213:258], } // FeatureString retrieves an enum value from the enum constants string name. diff --git a/pkg/frontend/admin_openshiftcluster_resize_controlplane_test.go b/pkg/frontend/admin_openshiftcluster_resize_controlplane_test.go index d4152097bbd..e471abb4717 100644 --- a/pkg/frontend/admin_openshiftcluster_resize_controlplane_test.go +++ b/pkg/frontend/admin_openshiftcluster_resize_controlplane_test.go @@ -26,6 +26,7 @@ import ( machinev1beta1 "github.com/openshift/api/machine/v1beta1" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/frontend/adminactions" "github.com/Azure/ARO-RP/pkg/metrics/noop" @@ -608,7 +609,7 @@ func TestAdminResizeControlPlane(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), diff --git a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go index 6923cb88cbb..e1d0ffe6533 100644 --- a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go +++ b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go @@ -23,6 +23,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" "github.com/Azure/ARO-RP/pkg/database/cosmosdb" "github.com/Azure/ARO-RP/pkg/env" @@ -193,13 +194,13 @@ func defaultValidateResizeQuota(ctx context.Context, environment env.Interface, // capacity — without a capacity reservation, AllocationFailed errors can only // be detected at ARM PUT time. func checkResizeComputeQuota(ctx context.Context, spComputeUsage compute.UsageClient, location, currentVMSize, desiredVMSize string) error { - newSizeStruct, ok := validate.VMSizeFromName(api.VMSize(desiredVMSize)) + newSizeStruct, ok := validate.VMSizeFromName(vms.VMSize(desiredVMSize)) if !ok { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "vmSize", fmt.Sprintf("The provided VM SKU '%s' is not supported.", desiredVMSize)) } - currentSizeStruct, ok := validate.VMSizeFromName(api.VMSize(currentVMSize)) + currentSizeStruct, ok := validate.VMSizeFromName(vms.VMSize(currentVMSize)) if !ok { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "vmSize", fmt.Sprintf("The current VM SKU '%s' could not be resolved.", currentVMSize)) @@ -219,8 +220,8 @@ func checkResizeComputeQuota(ctx context.Context, spComputeUsage compute.UsageCl totalAdditionalRegionalCores := max((newSizeStruct.CoreCount-currentSizeStruct.CoreCount)*api.ControlPlaneNodeCount, 0) requiredByQuota := map[string]int{ - newSizeStruct.Family: totalAdditionalCores, - "cores": totalAdditionalRegionalCores, + string(newSizeStruct.Family): totalAdditionalCores, + "cores": totalAdditionalRegionalCores, } usages, err := spComputeUsage.List(ctx, location) diff --git a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation_test.go b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation_test.go index 8899e8710a9..ac9b5b27dac 100644 --- a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation_test.go +++ b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation_test.go @@ -26,6 +26,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/frontend/adminactions" "github.com/Azure/ARO-RP/pkg/metrics/noop" @@ -174,7 +175,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -226,7 +227,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -261,7 +262,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -315,7 +316,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -340,7 +341,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -379,7 +380,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), @@ -438,7 +439,7 @@ func TestPreResizeControlPlaneVMsValidation(t *testing.T) { Location: "eastus", Properties: api.OpenShiftClusterProperties{ MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD8sV3, + VMSize: vms.VMSizeStandardD8sV3, }, ClusterProfile: api.ClusterProfile{ ResourceGroupID: fmt.Sprintf("/subscriptions/%s/resourceGroups/test-cluster", mockSubID), diff --git a/pkg/frontend/admin_supportedvmsizes_list_test.go b/pkg/frontend/admin_supportedvmsizes_list_test.go index 60c84a6be4a..39d07553ab7 100644 --- a/pkg/frontend/admin_supportedvmsizes_list_test.go +++ b/pkg/frontend/admin_supportedvmsizes_list_test.go @@ -9,44 +9,43 @@ import ( "github.com/go-test/deep" - "github.com/Azure/ARO-RP/pkg/api" - "github.com/Azure/ARO-RP/pkg/api/validate" + "github.com/Azure/ARO-RP/pkg/api/util/vms" utilerror "github.com/Azure/ARO-RP/test/util/error" ) func TestSupportedvmsizes(t *testing.T) { - mastervmsizes := validate.SupportedVMSizesByRole(validate.VMRoleMaster) - workervmsizes := validate.SupportedVMSizesByRole(validate.VMRoleWorker) + mastervmsizes := vms.SupportedVMSizesByRole[vms.VMRoleMaster] + workervmsizes := vms.SupportedVMSizesByRole[vms.VMRoleWorker] type test struct { name string - vmRole string - wantResponse map[api.VMSize]api.VMSizeStruct + vmRole vms.VMRole + wantResponse map[vms.VMSize]vms.VMSizeStruct wantError string } for _, tt := range []*test{ { name: "vmRole is invalid", - vmRole: "invalidVMRole", + vmRole: vms.VMRole("invalidVMRole"), wantError: `400: InvalidParameter: : The provided vmRole 'invalidVMRole' is invalid. vmRole can only be master or worker`, wantResponse: nil, }, { name: "vmRole is empty", - vmRole: "", + vmRole: vms.VMRole(""), wantError: `400: InvalidParameter: : The provided vmRole '' is invalid. vmRole can only be master or worker`, wantResponse: nil, }, { name: "master as vmRole", - vmRole: "master", + vmRole: vms.VMRoleMaster, wantError: "", wantResponse: mastervmsizes, }, { name: "worker as vmRole", - vmRole: "worker", + vmRole: vms.VMRoleWorker, wantError: "", wantResponse: workervmsizes, }, @@ -56,7 +55,7 @@ func TestSupportedvmsizes(t *testing.T) { gotResponse, err := f.supportedVMSizesForRole(tt.vmRole) utilerror.AssertErrorMessage(t, err, tt.wantError) if gotResponse != nil || tt.wantResponse != nil { - v := map[api.VMSize]api.VMSizeStruct{} + v := map[vms.VMSize]vms.VMSizeStruct{} err = json.Unmarshal(gotResponse, &v) if err != nil { t.Error(err) diff --git a/pkg/frontend/admin_supportvmsizes_list.go b/pkg/frontend/admin_supportvmsizes_list.go index 81e577d4ebe..0a82a452184 100644 --- a/pkg/frontend/admin_supportvmsizes_list.go +++ b/pkg/frontend/admin_supportvmsizes_list.go @@ -11,23 +11,23 @@ import ( "github.com/sirupsen/logrus" "github.com/Azure/ARO-RP/pkg/api" - "github.com/Azure/ARO-RP/pkg/api/validate" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/frontend/middleware" ) func (f *frontend) supportedvmsizes(w http.ResponseWriter, r *http.Request) { ctx := r.Context() log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry) - vmRole := r.URL.Query().Get("vmRole") + vmRole := vms.VMRole(r.URL.Query().Get("vmRole")) b, err := f.supportedVMSizesForRole(vmRole) reply(log, w, nil, b, err) } -func (f *frontend) supportedVMSizesForRole(vmRole string) ([]byte, error) { - if vmRole != validate.VMRoleMaster && vmRole != validate.VMRoleWorker { +func (f *frontend) supportedVMSizesForRole(vmRole vms.VMRole) ([]byte, error) { + if vmRole != vms.VMRoleMaster && vmRole != vms.VMRoleWorker { return nil, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "", fmt.Sprintf("The provided vmRole '%s' is invalid. vmRole can only be master or worker", vmRole)) } - vmsizes := validate.SupportedVMSizesByRole(vmRole) + vmsizes := vms.SupportedVMSizesByRole[vmRole] b, err := json.MarshalIndent(vmsizes, "", " ") if err != nil { return b, err diff --git a/pkg/frontend/openshiftcluster_preflightvalidation.go b/pkg/frontend/openshiftcluster_preflightvalidation.go index 4546d94f4f1..0c4bacf09ef 100644 --- a/pkg/frontend/openshiftcluster_preflightvalidation.go +++ b/pkg/frontend/openshiftcluster_preflightvalidation.go @@ -14,7 +14,6 @@ import ( "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/database/cosmosdb" - "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/frontend/middleware" "github.com/Azure/ARO-RP/pkg/util/version" ) @@ -134,7 +133,7 @@ func (f *frontend) _preflightValidation(ctx context.Context, log *logrus.Entry, } ext := converter.ToExternal(oc) - if err = staticValidator.Static(ext, nil, f.env.Location(), f.env.Domain(), f.env.FeatureIsSet(env.FeatureRequireD2sWorkers), version.InstallArchitectureVersion, resourceID); err != nil { + if err = staticValidator.Static(ext, nil, f.env.IsCI(), f.env.Location(), f.env.Domain(), version.InstallArchitectureVersion, resourceID); err != nil { return api.ValidationResult{ Status: api.ValidationStatusFailed, Error: &api.CloudErrorBody{ @@ -143,7 +142,7 @@ func (f *frontend) _preflightValidation(ctx context.Context, log *logrus.Entry, } } } else { - if err := staticValidator.Static(ext, doc.OpenShiftCluster, f.env.Location(), f.env.Domain(), f.env.FeatureIsSet(env.FeatureRequireD2sWorkers), version.InstallArchitectureVersion, resourceID); err != nil { + if err := staticValidator.Static(ext, doc.OpenShiftCluster, f.env.IsCI(), f.env.Location(), f.env.Domain(), version.InstallArchitectureVersion, resourceID); err != nil { return api.ValidationResult{ Status: api.ValidationStatusFailed, Error: &api.CloudErrorBody{ diff --git a/pkg/frontend/openshiftcluster_preflightvalidation_test.go b/pkg/frontend/openshiftcluster_preflightvalidation_test.go index cb2a3df2038..f90eefb68b9 100644 --- a/pkg/frontend/openshiftcluster_preflightvalidation_test.go +++ b/pkg/frontend/openshiftcluster_preflightvalidation_test.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/metrics/noop" "github.com/Azure/ARO-RP/pkg/util/version" testdatabase "github.com/Azure/ARO-RP/test/database" @@ -88,9 +89,9 @@ func TestPreflightValidation(t *testing.T) { preflightPayload := []byte(fmt.Sprintf(preflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, version.DefaultInstallStream.Version.String(), - mockSubID, mockSubID, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + mockSubID, mockSubID, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -252,9 +253,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - platformIdentities, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + platformIdentities, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -279,9 +280,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, "", defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - platformIdentities, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + platformIdentities, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -309,9 +310,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, "", defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - "", netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + "", netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -339,9 +340,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - missingPlatformIdentities, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + missingPlatformIdentities, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -370,9 +371,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - incorrectPlatformIdentities, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + incorrectPlatformIdentities, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -401,9 +402,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - fmt.Sprintf(extraPlatformIdentities, ""), netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + fmt.Sprintf(extraPlatformIdentities, ""), netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -560,9 +561,9 @@ func TestPreflightValidation(t *testing.T) { `, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, - mockSubID, mockSubID, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + mockSubID, mockSubID, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -609,7 +610,7 @@ func TestPreflightValidation(t *testing.T) { ServiceCIDR: netProfile, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, SubnetID: masterSub, DiskEncryptionSetID: encryptionSet, EncryptionAtHost: api.EncryptionAtHostEnabled, @@ -617,7 +618,7 @@ func TestPreflightValidation(t *testing.T) { WorkerProfiles: []api.WorkerProfile{ { Name: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, DiskSizeGB: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, EncryptionAtHost: api.EncryptionAtHostEnabled, SubnetID: workerSub, @@ -639,9 +640,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(preflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, version.DefaultInstallStream.Version.String(), - mockSubID, mockSubID, netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + mockSubID, mockSubID, netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -765,7 +766,7 @@ func TestPreflightValidation(t *testing.T) { ServiceCIDR: netProfile, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, SubnetID: masterSub, DiskEncryptionSetID: encryptionSet, EncryptionAtHost: api.EncryptionAtHostEnabled, @@ -773,7 +774,7 @@ func TestPreflightValidation(t *testing.T) { WorkerProfiles: []api.WorkerProfile{ { Name: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, DiskSizeGB: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, EncryptionAtHost: api.EncryptionAtHostEnabled, SubnetID: workerSub, @@ -795,9 +796,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - fmt.Sprintf(extraPlatformIdentities, ""), netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + fmt.Sprintf(extraPlatformIdentities, ""), netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, @@ -889,7 +890,7 @@ func TestPreflightValidation(t *testing.T) { ServiceCIDR: netProfile, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, SubnetID: masterSub, DiskEncryptionSetID: encryptionSet, EncryptionAtHost: api.EncryptionAtHostEnabled, @@ -897,7 +898,7 @@ func TestPreflightValidation(t *testing.T) { WorkerProfiles: []api.WorkerProfile{ { Name: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, - VMSize: api.VMSizeStandardD32sV3, + VMSize: vms.VMSizeStandardD32sV3, DiskSizeGB: api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, EncryptionAtHost: api.EncryptionAtHostEnabled, SubnetID: workerSub, @@ -919,9 +920,9 @@ func TestPreflightValidation(t *testing.T) { []byte(fmt.Sprintf(managedIdentityClusterPreflightPayloadTemplate, apiVersion, clusterId, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Name, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Type, location, clusterMSI, defaultProfile, resourceGroup, api.EncryptionAtHostEnabled, defaultVersion, - fmt.Sprintf(extraPlatformIdentities, upgradeableTo), netProfile, netProfile, api.VMSizeStandardD32sV3, masterSub, + fmt.Sprintf(extraPlatformIdentities, upgradeableTo), netProfile, netProfile, vms.VMSizeStandardD32sV3, masterSub, api.EncryptionAtHostEnabled, encryptionSet, - api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, api.VMSizeStandardD32sV3, + api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Name, vms.VMSizeStandardD32sV3, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].DiskSizeGB, api.EncryptionAtHostEnabled, workerSub, api.ExampleOpenShiftClusterDocument().OpenShiftCluster.Properties.WorkerProfiles[0].Count, diff --git a/pkg/frontend/openshiftcluster_putorpatch.go b/pkg/frontend/openshiftcluster_putorpatch.go index 61e91a7fe17..786173811da 100644 --- a/pkg/frontend/openshiftcluster_putorpatch.go +++ b/pkg/frontend/openshiftcluster_putorpatch.go @@ -21,7 +21,6 @@ import ( "github.com/Azure/ARO-RP/pkg/api/admin" "github.com/Azure/ARO-RP/pkg/api/v20240812preview" "github.com/Azure/ARO-RP/pkg/database/cosmosdb" - "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/frontend/middleware" "github.com/Azure/ARO-RP/pkg/operator" "github.com/Azure/ARO-RP/pkg/util/version" @@ -263,7 +262,7 @@ func (f *frontend) _putOrPatchOpenShiftCluster(ctx context.Context, log *logrus. return nil, err } } else { - err = putOrPatchClusterParameters.staticValidator.Static(ext, doc.OpenShiftCluster, f.env.Location(), f.env.Domain(), f.env.FeatureIsSet(env.FeatureRequireD2sWorkers), version.InstallArchitectureVersion, putOrPatchClusterParameters.path) + err = putOrPatchClusterParameters.staticValidator.Static(ext, doc.OpenShiftCluster, f.env.IsCI(), f.env.Location(), f.env.Domain(), version.InstallArchitectureVersion, putOrPatchClusterParameters.path) if err != nil { return nil, err } @@ -445,7 +444,7 @@ func validateIdentityTenantID(cluster *api.OpenShiftCluster, identityTenantID st } func (f *frontend) ValidateNewCluster(ctx context.Context, subscription *api.SubscriptionDocument, cluster *api.OpenShiftCluster, staticValidator api.OpenShiftClusterStaticValidator, ext interface{}, path string) error { - err := staticValidator.Static(ext, nil, f.env.Location(), f.env.Domain(), f.env.FeatureIsSet(env.FeatureRequireD2sWorkers), version.InstallArchitectureVersion, path) + err := staticValidator.Static(ext, nil, f.env.IsCI(), f.env.Location(), f.env.Domain(), version.InstallArchitectureVersion, path) if err != nil { return err } diff --git a/pkg/frontend/openshiftcluster_putorpatch_test.go b/pkg/frontend/openshiftcluster_putorpatch_test.go index 2335969c4c5..1ae89ffdd2f 100644 --- a/pkg/frontend/openshiftcluster_putorpatch_test.go +++ b/pkg/frontend/openshiftcluster_putorpatch_test.go @@ -17,6 +17,7 @@ import ( "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/api/admin" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/v20240812preview" "github.com/Azure/ARO-RP/pkg/frontend/middleware" "github.com/Azure/ARO-RP/pkg/metrics/noop" @@ -318,14 +319,14 @@ func getOpenShiftClusterDocument(provisioningState, lastProvisioningState, faile }, MasterProfile: api.MasterProfile{ EncryptionAtHost: api.EncryptionAtHostDisabled, - VMSize: api.VMSize(mockVMSize), + VMSize: vms.VMSize(mockVMSize), SubnetID: mockMasterSubnetID, }, WorkerProfiles: []api.WorkerProfile{ { Name: "worker", EncryptionAtHost: api.EncryptionAtHostDisabled, - VMSize: api.VMSize(mockVMSize), + VMSize: vms.VMSize(mockVMSize), DiskSizeGB: 128, Count: 3, SubnetID: mockWorkerSubnetID, @@ -1789,14 +1790,14 @@ func TestPutorPatchOpenShiftClusterAdminAPI(t *testing.T) { }, MasterProfile: admin.MasterProfile{ EncryptionAtHost: admin.EncryptionAtHostDisabled, - VMSize: admin.VMSize(mockVMSize), + VMSize: vms.VMSize(mockVMSize), SubnetID: mockMasterSubnetID, }, WorkerProfiles: []admin.WorkerProfile{ { Name: "worker", EncryptionAtHost: admin.EncryptionAtHostDisabled, - VMSize: admin.VMSize(mockVMSize), + VMSize: vms.VMSize(mockVMSize), DiskSizeGB: 128, Count: 3, SubnetID: mockWorkerSubnetID, diff --git a/pkg/frontend/quota_validation.go b/pkg/frontend/quota_validation.go index aa9fb787680..2992e85c263 100644 --- a/pkg/frontend/quota_validation.go +++ b/pkg/frontend/quota_validation.go @@ -9,6 +9,7 @@ import ( "net/http" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" "github.com/Azure/ARO-RP/pkg/env" "github.com/Azure/ARO-RP/pkg/util/azureclient" @@ -22,7 +23,7 @@ type QuotaValidator interface { type quotaValidator struct{} -func addRequiredResources(requiredResources map[string]int, vmSize api.VMSize, count int) error { +func addRequiredResources(requiredResources map[string]int, vmSize vms.VMSize, count int) error { vm, ok := validate.VMSizeFromName(vmSize) if !ok { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "", fmt.Sprintf("The provided VM SKU %s is not supported.", vmSize)) @@ -31,7 +32,7 @@ func addRequiredResources(requiredResources map[string]int, vmSize api.VMSize, c requiredResources["virtualMachines"] += count requiredResources["PremiumDiskCount"] += count - requiredResources[vm.Family] += vm.CoreCount * count + requiredResources[vm.Family.String()] += vm.CoreCount * count requiredResources["cores"] += vm.CoreCount * count return nil } diff --git a/pkg/frontend/shared_test.go b/pkg/frontend/shared_test.go index 120356e8fd2..f249b69ad2e 100644 --- a/pkg/frontend/shared_test.go +++ b/pkg/frontend/shared_test.go @@ -101,7 +101,7 @@ type testInfra struct { } func newTestInfra(t *testing.T) *testInfra { - return newTestInfraWithFeatures(t, map[env.Feature]bool{env.FeatureRequireD2sWorkers: false, env.FeatureDisableReadinessDelay: false, env.FeatureEnableMISE: false, env.FeatureEnforceMISE: false}) + return newTestInfraWithFeatures(t, map[env.Feature]bool{env.FeatureDisableReadinessDelay: false, env.FeatureEnableMISE: false, env.FeatureEnforceMISE: false}) } func newTestInfraWithFeatures(t *testing.T, features map[env.Feature]bool) *testInfra { diff --git a/pkg/frontend/sku_test.go b/pkg/frontend/sku_test.go index 6e452b6438f..03cbe1dde35 100644 --- a/pkg/frontend/sku_test.go +++ b/pkg/frontend/sku_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" mock_armcompute "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/azuresdk/armcompute" "github.com/Azure/ARO-RP/pkg/util/pointerutils" utilerror "github.com/Azure/ARO-RP/test/util/error" @@ -233,16 +234,16 @@ func TestValidateVMSku(t *testing.T) { Properties: api.OpenShiftClusterProperties{ WorkerProfiles: []api.WorkerProfile{ { - VMSize: api.VMSize(tt.workerProfile1Sku), + VMSize: vms.VMSize(tt.workerProfile1Sku), EncryptionAtHost: tt.workerEncryptionAtHost, }, { - VMSize: api.VMSize(tt.workerProfile2Sku), + VMSize: vms.VMSize(tt.workerProfile2Sku), EncryptionAtHost: tt.workerEncryptionAtHost, }, }, MasterProfile: api.MasterProfile{ - VMSize: api.VMSize(tt.masterProfileSku), + VMSize: vms.VMSize(tt.masterProfileSku), EncryptionAtHost: tt.masterEncryptionAtHost, }, }, @@ -309,10 +310,10 @@ func TestValidateVMSku(t *testing.T) { oc.Properties.WorkerProfiles = nil oc.Properties.WorkerProfilesStatus = []api.WorkerProfile{ { - VMSize: api.VMSize(tt.workerProfile1Sku), + VMSize: vms.VMSize(tt.workerProfile1Sku), }, { - VMSize: api.VMSize(tt.workerProfile2Sku), + VMSize: vms.VMSize(tt.workerProfile2Sku), }, } } diff --git a/pkg/frontend/validate.go b/pkg/frontend/validate.go index 16605b3a6b9..bef23b444a5 100644 --- a/pkg/frontend/validate.go +++ b/pkg/frontend/validate.go @@ -17,7 +17,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/ARO-RP/pkg/api" - "github.com/Azure/ARO-RP/pkg/api/validate" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/database/cosmosdb" utilnamespace "github.com/Azure/ARO-RP/pkg/util/namespace" ) @@ -219,7 +219,7 @@ func validateNetworkInterfaceName(nicName string) error { func validateAdminMasterVMSize(vmSize string) error { // check to ensure that the target size is supported as a master size - for k := range validate.SupportedVMSizesByRole(validate.VMRoleMaster) { + for k := range vms.SupportedVMSizesByRole[vms.VMRoleMaster] { if strings.EqualFold(string(k), vmSize) { return nil } diff --git a/pkg/operator/controllers/machine/machine.go b/pkg/operator/controllers/machine/machine.go index 08eb7a85adf..cda9d8e6530 100644 --- a/pkg/operator/controllers/machine/machine.go +++ b/pkg/operator/controllers/machine/machine.go @@ -12,6 +12,7 @@ import ( machinev1beta1 "github.com/openshift/api/machine/v1beta1" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/api/validate" utilmachine "github.com/Azure/ARO-RP/pkg/util/machine" _ "github.com/Azure/ARO-RP/pkg/util/scheme" @@ -44,6 +45,11 @@ func (r *Reconciler) machineValid(ctx context.Context, machine *machinev1beta1.M return []error{fmt.Errorf("machine %s: failed to read provider spec: %v", machine.Name, err)} } + // Validate VM size in machine provider spec + if !validate.VMSizeIsValid(vms.VMSize(machineProviderSpec.VMSize), isMaster, false) { + errs = append(errs, fmt.Errorf("machine %s: invalid VM size '%v'", machine.Name, machineProviderSpec.VMSize)) + } + // Validate disk size in machine provider spec if !isMaster && !validate.DiskSizeIsValid(int(machineProviderSpec.OSDisk.DiskSizeGB)) { errs = append(errs, fmt.Errorf("machine %s: invalid disk size '%v'", machine.Name, machineProviderSpec.OSDisk.DiskSizeGB)) diff --git a/pkg/util/cluster/cluster.go b/pkg/util/cluster/cluster.go index 8dd640a2b4c..08a207e230d 100644 --- a/pkg/util/cluster/cluster.go +++ b/pkg/util/cluster/cluster.go @@ -35,6 +35,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" v20250725 "github.com/Azure/ARO-RP/pkg/api/v20250725" mgmtredhatopenshift20250725 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2025-07-25/redhatopenshift" "github.com/Azure/ARO-RP/pkg/deploy/assets" @@ -74,10 +75,11 @@ type ClusterConfig struct { NoInternet bool `mapstructure:"NO_INTERNET"` MockMSIObjectID string `mapstructure:"MOCK_MSI_OBJECT_ID"` - MasterVMSize string `mapstructure:"MASTER_VM_SIZE"` - WorkerVMSize string `mapstructure:"WORKER_VM_SIZE"` - MasterVMSizes []string `mapstructure:"MASTER_VM_SIZES"` - WorkerVMSizes []string `mapstructure:"WORKER_VM_SIZES"` + MasterVMSize vms.VMSize `mapstructure:"MASTER_VM_SIZE"` + WorkerVMSize vms.VMSize `mapstructure:"WORKER_VM_SIZE"` + // TODO: MAITIU - Do we need to touch this? + CandidateMasterVMSizes []vms.VMSize `mapstructure:"MASTER_VM_SIZES"` + CandidateWorkerVMSizes []vms.VMSize `mapstructure:"WORKER_VM_SIZES"` } func (cc *ClusterConfig) IsLocalDevelopmentMode() bool { @@ -114,22 +116,6 @@ const ( aroClusterIdentityOperatorName = "aro-Cluster" ) -func DefaultMasterVmSizes() []string { - return []string{ - api.VMSizeStandardD8sV5.String(), - api.VMSizeStandardD8sV4.String(), - api.VMSizeStandardD8sV3.String(), - } -} - -func DefaultWorkerVmSizes() []string { - return []string{ - api.VMSizeStandardD4sV5.String(), - api.VMSizeStandardD4sV4.String(), - api.VMSizeStandardD4sV3.String(), - } -} - func insecureLocalClient() *http.Client { return &http.Client{ Transport: &http.Transport{ @@ -140,6 +126,8 @@ func insecureLocalClient() *http.Client { } } +// NewClusterConfigFromEnv should only be used in the context of CI or local +// development. func NewClusterConfigFromEnv() (*ClusterConfig, error) { var conf ClusterConfig viper.AutomaticEnv() @@ -184,28 +172,18 @@ func NewClusterConfigFromEnv() (*ClusterConfig, error) { } // Set VM size defaults only if user hasn't provided any values - if len(conf.MasterVMSizes) == 0 { + if len(conf.CandidateMasterVMSizes) == 0 { if conf.MasterVMSize == "" { - conf.MasterVMSizes = DefaultMasterVmSizes() + conf.CandidateMasterVMSizes = vms.GetCICandidateMasterVMSizes() } else { - conf.MasterVMSizes = []string{conf.MasterVMSize} + conf.CandidateMasterVMSizes = []vms.VMSize{conf.MasterVMSize} } } - if len(conf.WorkerVMSizes) == 0 { + if len(conf.CandidateWorkerVMSizes) == 0 { if conf.WorkerVMSize == "" { - // No explicit worker VM size set - use defaults. - // In local dev mode, use D2s sizes only (RequireD2sWorkers feature flag). - if conf.IsLocalDevelopmentMode() { - conf.WorkerVMSizes = []string{ - api.VMSizeStandardD2sV5.String(), - api.VMSizeStandardD2sV4.String(), - api.VMSizeStandardD2sV3.String(), - } - } else { - conf.WorkerVMSizes = DefaultWorkerVmSizes() - } + conf.CandidateWorkerVMSizes = vms.GetCICandidateWorkerVMSizes() } else { - conf.WorkerVMSizes = []string{conf.WorkerVMSize} + conf.CandidateWorkerVMSizes = []vms.VMSize{conf.WorkerVMSize} } } @@ -957,7 +935,7 @@ func (c *Cluster) Delete(ctx context.Context, vnetResourceGroup, clusterName str errs = append(errs, fmt.Errorf("failed to delete cluster: %w", err)) } - if err := c.deleteMiwiRoleAssignments(ctx, vnetResourceGroup); err != nil { + if err := c.deleteWimiRoleAssignments(ctx, vnetResourceGroup); err != nil { c.log.Errorf("Failed to delete workload identity role assignments: %v", err) errs = append(errs, fmt.Errorf("failed to delete workload identity role assignments: %w", err)) } @@ -994,7 +972,7 @@ func (c *Cluster) Delete(ctx context.Context, vnetResourceGroup, clusterName str errs = append(errs, fmt.Errorf("failed to delete cluster: %w", err)) } - if err := c.deleteMiwiRoleAssignments(ctx, vnetResourceGroup); err != nil { + if err := c.deleteWimiRoleAssignments(ctx, vnetResourceGroup); err != nil { c.log.Errorf("Failed to delete workload identity role assignments: %v", err) errs = append(errs, fmt.Errorf("failed to delete workload identity role assignments: %w", err)) } @@ -1159,8 +1137,8 @@ func (c *Cluster) createCluster(ctx context.Context, vnetResourceGroup, clusterN } } - oc.Properties.MasterProfile.VMSize = api.VMSize(c.Config.MasterVMSizes[masterIdx]) - oc.Properties.WorkerProfiles[0].VMSize = api.VMSize(c.Config.WorkerVMSizes[workerIdx]) + oc.Properties.MasterProfile.VMSize = c.Config.CandidateMasterVMSizes[masterIdx] + oc.Properties.WorkerProfiles[0].VMSize = c.Config.CandidateWorkerVMSizes[workerIdx] c.log.Infof("Creating cluster %s with master VM size %s and worker VM size %s", clusterName, oc.Properties.MasterProfile.VMSize, oc.Properties.WorkerProfiles[0].VMSize) err = c.openshiftclusters.CreateOrUpdateAndWait(ctx, vnetResourceGroup, clusterName, &oc) @@ -1178,13 +1156,13 @@ func (c *Cluster) createCluster(ctx context.Context, vnetResourceGroup, clusterN case azureerrors.VMProfileWorker: c.log.WithError(err).Errorf("error creating cluster with worker VM size %s, trying next size", oc.Properties.WorkerProfiles[0].VMSize) workerIdx++ - if workerIdx >= len(c.Config.WorkerVMSizes) { + if workerIdx >= len(c.Config.CandidateWorkerVMSizes) { return fmt.Errorf("exhausted all worker VM sizes: %w", err) } case azureerrors.VMProfileMaster: c.log.WithError(err).Errorf("error creating cluster with master VM size %s, trying next size", oc.Properties.MasterProfile.VMSize) masterIdx++ - if masterIdx >= len(c.Config.MasterVMSizes) { + if masterIdx >= len(c.Config.CandidateMasterVMSizes) { return fmt.Errorf("exhausted all master VM sizes: %w", err) } default: @@ -1193,10 +1171,10 @@ func (c *Cluster) createCluster(ctx context.Context, vnetResourceGroup, clusterN c.log.WithError(err).Errorf("error creating cluster with VM sizes (master: %s, worker: %s), cannot determine failing profile", oc.Properties.MasterProfile.VMSize, oc.Properties.WorkerProfiles[0].VMSize) workerIdx++ - if workerIdx >= len(c.Config.WorkerVMSizes) { + if workerIdx >= len(c.Config.CandidateWorkerVMSizes) { workerIdx = 0 masterIdx++ - if masterIdx >= len(c.Config.MasterVMSizes) { + if masterIdx >= len(c.Config.CandidateMasterVMSizes) { return fmt.Errorf("exhausted all VM size combinations: %w", err) } } @@ -1444,7 +1422,6 @@ func (c *Cluster) fixupNSGs(ctx context.Context, vnetResourceGroup, clusterName func (c *Cluster) deleteRoleAssignments(ctx context.Context, vnetResourceGroup, clusterName string) error { if c.Config.UseWorkloadIdentity { c.log.Print("Skipping deletion of service principal role assignments") - return nil } c.log.Print("deleting role assignments") oc, err := c.openshiftclusters.Get(ctx, vnetResourceGroup, clusterName) @@ -1481,12 +1458,12 @@ func (c *Cluster) deleteRoleAssignments(ctx context.Context, vnetResourceGroup, return nil } -func (c *Cluster) deleteMiwiRoleAssignments(ctx context.Context, vnetResourceGroup string) error { +func (c *Cluster) deleteWimiRoleAssignments(ctx context.Context, vnetResourceGroup string) error { if !c.Config.UseWorkloadIdentity { - c.log.Print("Skipping deletion of miwi role assignments") + c.log.Print("Skipping deletion of wimi roleassignments") return nil } - c.log.Print("deleting miwi role assignments") + c.log.Print("deleting wimi role assignments") var wiRoleSets []api.PlatformWorkloadIdentityRoleSetProperties if err := json.Unmarshal([]byte(c.Config.WorkloadIdentityRoles), &wiRoleSets); err != nil { diff --git a/pkg/util/clusterdata/worker_profile.go b/pkg/util/clusterdata/worker_profile.go index db2423378bb..3163177f7b0 100644 --- a/pkg/util/clusterdata/worker_profile.go +++ b/pkg/util/clusterdata/worker_profile.go @@ -23,6 +23,7 @@ import ( operatorclient "github.com/openshift/client-go/operator/clientset/versioned" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" _ "github.com/Azure/ARO-RP/pkg/util/scheme" ) @@ -79,7 +80,7 @@ func (ce machineClientEnricher) Enrich( continue } - workerProfiles[i].VMSize = api.VMSize(machineProviderSpec.VMSize) + workerProfiles[i].VMSize = vms.VMSize(machineProviderSpec.VMSize) workerProfiles[i].DiskSizeGB = int(machineProviderSpec.OSDisk.DiskSizeGB) workerProfiles[i].SubnetID = fmt.Sprintf( "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s", diff --git a/pkg/util/clusterdata/worker_profile_test.go b/pkg/util/clusterdata/worker_profile_test.go index 2d330a1f841..64fecabc324 100644 --- a/pkg/util/clusterdata/worker_profile_test.go +++ b/pkg/util/clusterdata/worker_profile_test.go @@ -23,6 +23,7 @@ import ( machinefake "github.com/openshift/client-go/machine/clientset/versioned/fake" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/api/util/vms" "github.com/Azure/ARO-RP/pkg/util/cmp" "github.com/Azure/ARO-RP/pkg/util/pointerutils" errorHandling "github.com/Azure/ARO-RP/test/util/error" @@ -292,7 +293,7 @@ func validWorkerProfile() []api.WorkerProfile { return []api.WorkerProfile{ { Name: "fake-worker-profile-1", - VMSize: api.VMSizeStandardD4sV3, + VMSize: vms.VMSizeStandardD4sV3, DiskSizeGB: 512, EncryptionAtHost: api.EncryptionAtHostDisabled, SubnetID: workerSubnetID, @@ -300,7 +301,7 @@ func validWorkerProfile() []api.WorkerProfile { }, { Name: "fake-worker-profile-2", - VMSize: api.VMSizeStandardD4sV3, + VMSize: vms.VMSizeStandardD4sV3, DiskSizeGB: 512, EncryptionAtHost: api.EncryptionAtHostDisabled, SubnetID: workerSubnetID, From fafb8731df64b46fbe68d9676b35b0b248118374 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 17:17:20 +0200 Subject: [PATCH 04/12] [ARO-24603] Update docs to reference new vms package location Update adding-new-instance-types.md to point to pkg/api/util/vms/ as the new location for VM size definitions. Co-Authored-By: Claude Opus 4.6 --- docs/adding-new-instance-types.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/docs/adding-new-instance-types.md b/docs/adding-new-instance-types.md index 3699217959d..3df71ccee09 100644 --- a/docs/adding-new-instance-types.md +++ b/docs/adding-new-instance-types.md @@ -33,29 +33,23 @@ The desired instance types should be free of any restrictions. The subscription ### CLI Method -1) Comment out `FeatureRequireD2sWorkers` from the range of features in `pkg/env/dev.go`. This will allow you to create development clusters with other VM sizes. +1) Follow the usual steps to [deploy a development RP](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md), but don't use the hack script to create a cluster. -> __NOTE:__ Please be responsible with your usage of larger VM sizes, as they incur additional cost. - -2) Follow the usual steps to [deploy a development RP](https://github.com/Azure/ARO-RP/blob/master/docs/deploy-development-rp.md), but don't use the hack script to create a cluster. - -3) Follow steps in https://docs.microsoft.com/en-us/azure/openshift/tutorial-create-cluster to create a cluster, specifying `-worker-vm-size` and/or `--master-vm-size` in the `az aro create` step to specify an alternate sku: +2) Follow steps in https://docs.microsoft.com/en-us/azure/openshift/tutorial-create-cluster to create a cluster, specifying `-worker-vm-size` and/or `--master-vm-size` in the `az aro create` step to specify an alternate sku: ~~~ az aro create --resource-group $RESOURCEGROUP --name $CLUSTER --vnet aro-lseries --master-subnet master-subnet --worker-subnet worker-subnet --worker-vm-size "Standard_L8s_v2" ~~~ -4) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability. +3) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability. ### Hack scripts method -1) Comment out `FeatureRequireD2sWorkers` from the range of features in `pkg/env/dev.go`. - -2) Start your local RP. If it was already running, restart it to take into account commented lines. +1) Start your local RP. If it was already running, restart it to take into account commented lines. -3) Use the [hack script to create a cluster.](https://github.com/cadenmarchese/ARO-RP/blob/master/docs/deploy-development-rp.md#run-the-rp-and-create-a-cluster), with MASTER_VM_SIZE and WORKER_VM_SIZE variables set to desired instance size. +2) Use the [hack script to create a cluster.](https://github.com/cadenmarchese/ARO-RP/blob/master/docs/deploy-development-rp.md#run-the-rp-and-create-a-cluster), with MASTER_VM_SIZE and WORKER_VM_SIZE variables set to desired instance size. -4) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability. +3) Once an install with an alternate size is successful, a basic check of cluster health can be conducted, as well as local e2e tests to confirm supportability. ### Post-install method From af23276c35a6d81b05fd2e07c0dfa98c7bcad511 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 18:15:39 +0200 Subject: [PATCH 05/12] [ARO-24603] Add focused VM size regression tests Cover the centralized VM-size helpers, CI-only validator paths, and cluster config/enricher behavior so the rebased stack has stronger regression protection before the follow-up fixes. Made-with: Cursor --- .../openshiftcluster_validatestatic_test.go | 39 ++++++ pkg/api/util/vms/sizes_test.go | 94 +++++++++++++++ .../openshiftcluster_validatestatic_test.go | 34 ++++++ .../openshiftcluster_validatestatic_test.go | 34 ++++++ .../openshiftcluster_validatestatic_test.go | 34 ++++++ .../openshiftcluster_validatestatic_test.go | 34 ++++++ pkg/util/cluster/cluster_config_test.go | 107 +++++++++++++++++ pkg/util/clusterdata/worker_profile_test.go | 113 +++++++++++++++++- 8 files changed, 485 insertions(+), 4 deletions(-) create mode 100644 pkg/util/cluster/cluster_config_test.go diff --git a/pkg/api/admin/openshiftcluster_validatestatic_test.go b/pkg/api/admin/openshiftcluster_validatestatic_test.go index 40dc61af6cf..3248c4e1b75 100644 --- a/pkg/api/admin/openshiftcluster_validatestatic_test.go +++ b/pkg/api/admin/openshiftcluster_validatestatic_test.go @@ -814,6 +814,45 @@ func TestOpenShiftClusterStaticValidateDelta(t *testing.T) { } } +func TestOpenShiftClusterStaticValidateIsCIParity(t *testing.T) { + tests := []struct { + name string + current *api.OpenShiftCluster + oc *OpenShiftCluster + isCI bool + wantErr string + }{ + { + name: "valid update in ci", + current: &api.OpenShiftCluster{}, + oc: (&openShiftClusterConverter{}).ToExternal(&api.OpenShiftCluster{}).(*OpenShiftCluster), + isCI: true, + }, + { + name: "admin create disallowed in ci", + current: nil, + oc: &OpenShiftCluster{}, + isCI: true, + wantErr: "400: RequestNotAllowed: : Admin API does not allow cluster creation.", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := (&openShiftClusterStaticValidator{}).Static(tt.oc, tt.current, tt.isCI, "", "", api.ArchitectureVersionV2, "") + if tt.wantErr == "" { + if err != nil { + t.Fatal(err) + } + return + } + if err == nil || err.Error() != tt.wantErr { + t.Fatalf("got err %v, want %q", err, tt.wantErr) + } + }) + } +} + // func toDate(t time.Time) *date.Time { // return &date.Time{Time: t} // } diff --git a/pkg/api/util/vms/sizes_test.go b/pkg/api/util/vms/sizes_test.go index 9eb083afcea..75f3665e8d5 100644 --- a/pkg/api/util/vms/sizes_test.go +++ b/pkg/api/util/vms/sizes_test.go @@ -22,3 +22,97 @@ func TestMinWorkerVMSizesAreSupported(t *testing.T) { } } } + +func TestLookupVMSizeFindsKnownAndUnknownSizes(t *testing.T) { + tests := []struct { + name string + vmSize VMSize + wantFound bool + wantCoreCount int + wantMinVersion19 bool + }{ + { + name: "finds production worker size", + vmSize: VMSizeStandardD4sV3, + wantFound: true, + wantCoreCount: 4, + }, + { + name: "finds CI-only worker size with minimum version", + vmSize: VMSizeStandardD2sV6, + wantFound: true, + wantCoreCount: 2, + wantMinVersion19: true, + }, + { + name: "finds CI-only master size", + vmSize: VMSizeStandardD4sV4, + wantFound: true, + wantCoreCount: 4, + }, + { + name: "unknown size is not found", + vmSize: VMSize("Standard_NotARealSize"), + wantFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, found := LookupVMSize(tt.vmSize) + if found != tt.wantFound { + t.Fatalf("LookupVMSize(%q) found=%v, want %v", tt.vmSize, found, tt.wantFound) + } + if !tt.wantFound { + return + } + if got.CoreCount != tt.wantCoreCount { + t.Fatalf("LookupVMSize(%q) coreCount=%d, want %d", tt.vmSize, got.CoreCount, tt.wantCoreCount) + } + if tt.wantMinVersion19 && !got.MinimumVersion.Eq(ver419) { + t.Fatalf("LookupVMSize(%q) minimumVersion=%v, want %v", tt.vmSize, got.MinimumVersion, ver419) + } + }) + } +} + +func TestGetCICandidateMasterVMSizesMatchMinimumMasterSet(t *testing.T) { + assertCandidateSetMatchesMinSizes(t, GetCICandidateMasterVMSizes(), minMasterVMSizes) +} + +func TestGetCICandidateWorkerVMSizesMatchMinimumWorkerSet(t *testing.T) { + assertCandidateSetMatchesMinSizes(t, GetCICandidateWorkerVMSizes(), minWorkerVMSizes) +} + +func assertCandidateSetMatchesMinSizes(t *testing.T, candidates []VMSize, expected map[VMSize]VMSizeStruct) { + t.Helper() + + if len(candidates) != len(expected) { + t.Fatalf("got %d candidates, want %d", len(candidates), len(expected)) + } + + seen := map[VMSize]bool{} + lastCoreCount := -1 + + for _, candidate := range candidates { + sizeInfo, ok := expected[candidate] + if !ok { + t.Fatalf("candidate %q is not in expected minimum size set", candidate) + } + if seen[candidate] { + t.Fatalf("candidate %q appears more than once", candidate) + } + seen[candidate] = true + + if sizeInfo.CoreCount < lastCoreCount { + t.Fatalf("candidate core counts are not non-decreasing: %d came after %d", sizeInfo.CoreCount, lastCoreCount) + } + lastCoreCount = sizeInfo.CoreCount + } + + for candidate := range expected { + if !seen[candidate] { + t.Fatalf("expected candidate %q was not returned", candidate) + } + } +} diff --git a/pkg/api/v20230904/openshiftcluster_validatestatic_test.go b/pkg/api/v20230904/openshiftcluster_validatestatic_test.go index d76f3b04770..ec9d4eebee7 100644 --- a/pkg/api/v20230904/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20230904/openshiftcluster_validatestatic_test.go @@ -767,6 +767,21 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID }, }, + { + name: "ci-only master vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D4s_v3" + }, + isCI: true, + }, + { + name: "master vmSize too small in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", + }, } runTests(t, testModeCreate, createTests) @@ -866,7 +881,26 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { }, } + createTests := []*validateTest{ + { + name: "ci-only worker vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" + }, + isCI: true, + }, + { + name: "ci-only worker vmSize still respects minimum version", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v6" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v6' is invalid.", + }, + } + // We do not perform this validation on update + runTests(t, testModeCreate, createTests) runTests(t, testModeCreate, tests) } diff --git a/pkg/api/v20231122/openshiftcluster_validatestatic_test.go b/pkg/api/v20231122/openshiftcluster_validatestatic_test.go index a5d2a2a9fe6..24a85ab272c 100644 --- a/pkg/api/v20231122/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20231122/openshiftcluster_validatestatic_test.go @@ -898,6 +898,21 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID }, }, + { + name: "ci-only master vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D4s_v3" + }, + isCI: true, + }, + { + name: "master vmSize too small in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", + }, } runTests(t, testModeCreate, createTests) @@ -997,7 +1012,26 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { }, } + createTests := []*validateTest{ + { + name: "ci-only worker vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" + }, + isCI: true, + }, + { + name: "ci-only worker vmSize still respects minimum version", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v6" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v6' is invalid.", + }, + } + // We do not perform this validation on update + runTests(t, testModeCreate, createTests) runTests(t, testModeCreate, tests) } diff --git a/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go b/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go index 3ac0fed2c3b..679877a6d26 100644 --- a/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20240812preview/openshiftcluster_validatestatic_test.go @@ -905,6 +905,21 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID }, }, + { + name: "ci-only master vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D4s_v3" + }, + isCI: true, + }, + { + name: "master vmSize too small in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid.", + }, } runTests(t, testModeCreate, createTests) @@ -1004,7 +1019,26 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { }, } + createTests := []*validateTest{ + { + name: "ci-only worker vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" + }, + isCI: true, + }, + { + name: "ci-only worker vmSize still respects minimum version", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v6" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v6' is invalid.", + }, + } + // We do not perform this validation on update + runTests(t, testModeCreate, createTests) runTests(t, testModeCreate, tests) } diff --git a/pkg/api/v20250725/openshiftcluster_validatestatic_test.go b/pkg/api/v20250725/openshiftcluster_validatestatic_test.go index 8a6fbe5ab4d..7852392d112 100644 --- a/pkg/api/v20250725/openshiftcluster_validatestatic_test.go +++ b/pkg/api/v20250725/openshiftcluster_validatestatic_test.go @@ -905,6 +905,21 @@ func TestOpenShiftClusterStaticValidateMasterProfile(t *testing.T) { oc.Properties.WorkerProfiles[0].DiskEncryptionSetID = desID }, }, + { + name: "ci-only master vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D4s_v3" + }, + isCI: true, + }, + { + name: "master vmSize too small in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.MasterProfile.VMSize = "Standard_D2s_v3" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.masterProfile.vmSize: The provided master VM size 'Standard_D2s_v3' is invalid for version '4.10.0'.", + }, } runTests(t, testModeCreate, createTests) @@ -1004,7 +1019,26 @@ func TestOpenShiftClusterStaticValidateWorkerProfile(t *testing.T) { }, } + createTests := []*validateTest{ + { + name: "ci-only worker vmSize valid in ci", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v3" + }, + isCI: true, + }, + { + name: "ci-only worker vmSize still respects minimum version", + modify: func(oc *OpenShiftCluster) { + oc.Properties.WorkerProfiles[0].VMSize = "Standard_D2s_v6" + }, + isCI: true, + wantErr: "400: InvalidParameter: properties.workerProfiles['worker'].vmSize: The provided worker VM size 'Standard_D2s_v6' is invalid.", + }, + } + // We do not perform this validation on update + runTests(t, testModeCreate, createTests) runTests(t, testModeCreate, tests) } diff --git a/pkg/util/cluster/cluster_config_test.go b/pkg/util/cluster/cluster_config_test.go new file mode 100644 index 00000000000..827c546b37f --- /dev/null +++ b/pkg/util/cluster/cluster_config_test.go @@ -0,0 +1,107 @@ +package cluster_test + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "reflect" + "testing" + + "github.com/spf13/viper" + + "github.com/Azure/ARO-RP/pkg/api/util/vms" + "github.com/Azure/ARO-RP/pkg/util/cluster" +) + +func TestNewClusterConfigFromEnvDefaultsCICandidateVMSizes(t *testing.T) { + viper.Reset() + t.Cleanup(viper.Reset) + + t.Setenv("CLUSTER", "test-cluster") + t.Setenv("RESOURCEGROUP", "rp-rg") + t.Setenv("AZURE_FP_SERVICE_PRINCIPAL_ID", "fp-sp") + t.Setenv("CI", "true") + + conf, err := cluster.NewClusterConfigFromEnv() + if err != nil { + t.Fatal(err) + } + + if conf.VnetResourceGroup != conf.ClusterName { + t.Fatalf("VnetResourceGroup=%q, want cluster name %q", conf.VnetResourceGroup, conf.ClusterName) + } + + if !sameVMSizeSet(conf.CandidateMasterVMSizes, vms.GetCICandidateMasterVMSizes()) { + t.Fatalf("unexpected master candidates: %v", conf.CandidateMasterVMSizes) + } + + if !sameVMSizeSet(conf.CandidateWorkerVMSizes, vms.GetCICandidateWorkerVMSizes()) { + t.Fatalf("unexpected worker candidates: %v", conf.CandidateWorkerVMSizes) + } +} + +func TestNewClusterConfigFromEnvUsesExplicitVMSizesAsCandidates(t *testing.T) { + viper.Reset() + t.Cleanup(viper.Reset) + + t.Setenv("CLUSTER", "test-cluster") + t.Setenv("RESOURCEGROUP", "rp-rg") + t.Setenv("AZURE_FP_SERVICE_PRINCIPAL_ID", "fp-sp") + t.Setenv("CI", "true") + t.Setenv("MASTER_VM_SIZE", string(vms.VMSizeStandardD8sV4)) + t.Setenv("WORKER_VM_SIZE", string(vms.VMSizeStandardD4sV4)) + + conf, err := cluster.NewClusterConfigFromEnv() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(conf.CandidateMasterVMSizes, []vms.VMSize{vms.VMSizeStandardD8sV4}) { + t.Fatalf("CandidateMasterVMSizes=%v, want [%s]", conf.CandidateMasterVMSizes, vms.VMSizeStandardD8sV4) + } + + if !reflect.DeepEqual(conf.CandidateWorkerVMSizes, []vms.VMSize{vms.VMSizeStandardD4sV4}) { + t.Fatalf("CandidateWorkerVMSizes=%v, want [%s]", conf.CandidateWorkerVMSizes, vms.VMSizeStandardD4sV4) + } +} + +func TestNewClusterConfigFromEnvRequiresWorkloadIdentityRoleSets(t *testing.T) { + viper.Reset() + t.Cleanup(viper.Reset) + + t.Setenv("CLUSTER", "test-cluster") + t.Setenv("RESOURCEGROUP", "rp-rg") + t.Setenv("AZURE_FP_SERVICE_PRINCIPAL_ID", "fp-sp") + t.Setenv("USE_WI", "true") + + _, err := cluster.NewClusterConfigFromEnv() + if err == nil || err.Error() != "workload Identity Role Set must be set" { + t.Fatalf("got err %v, want workload identity role set validation", err) + } +} + +func sameVMSizeSet(a, b []vms.VMSize) bool { + if len(a) != len(b) { + return false + } + + counts := map[vms.VMSize]int{} + for _, size := range a { + counts[size]++ + } + + for _, size := range b { + counts[size]-- + if counts[size] < 0 { + return false + } + } + + for _, count := range counts { + if count != 0 { + return false + } + } + + return true +} diff --git a/pkg/util/clusterdata/worker_profile_test.go b/pkg/util/clusterdata/worker_profile_test.go index 64fecabc324..b5bb9d379c6 100644 --- a/pkg/util/clusterdata/worker_profile_test.go +++ b/pkg/util/clusterdata/worker_profile_test.go @@ -30,10 +30,11 @@ import ( ) const ( - mockSubscriptionID = "00000000-0000-0000-0000-000000000000" - mockVnetRG = "fake-vnet-rg" - mockVnetName = "fake-vnet" - mockSubnetName = "cluster-worker" + mockSubscriptionID = "00000000-0000-0000-0000-000000000000" + mockVnetRG = "fake-vnet-rg" + mockVnetName = "fake-vnet" + mockSubnetName = "cluster-worker" + mockDiskEncryptionSetID = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/fake-vnet-rg/providers/Microsoft.Compute/diskEncryptionSets/test-des" ) func TestWorkerProfilesEnricherTask(t *testing.T) { @@ -79,6 +80,47 @@ func TestWorkerProfilesEnricherTask(t *testing.T) { wantOc: getWantOc(clusterID, validWorkerProfile()), givenOc: getGivenOc(clusterID), }, + { + name: "machine set objects exist - encryption at host and disk encryption set", + client: machinefake.NewSimpleClientset(createMachineSet("fake-worker-profile-1", validProvSpecWithSecurityAndDES())), + wantOc: getWantOc(clusterID, []api.WorkerProfile{ + { + Name: "fake-worker-profile-1", + VMSize: vms.VMSizeStandardD4sV3, + DiskSizeGB: 512, + EncryptionAtHost: api.EncryptionAtHostEnabled, + DiskEncryptionSetID: mockDiskEncryptionSetID, + SubnetID: fmt.Sprintf( + "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s", + mockSubscriptionID, mockVnetRG, mockVnetName, mockSubnetName, + ), + Count: 1, + }, + }), + givenOc: getGivenOc(clusterID), + }, + { + name: "machine set objects exist - nil replicas default to one", + client: machinefake.NewSimpleClientset(func() *machinev1beta1.MachineSet { + ms := createMachineSet("fake-worker-profile-1", validProvSpec()) + ms.Spec.Replicas = nil + return ms + }()), + wantOc: getWantOc(clusterID, []api.WorkerProfile{ + { + Name: "fake-worker-profile-1", + VMSize: vms.VMSizeStandardD4sV3, + DiskSizeGB: 512, + EncryptionAtHost: api.EncryptionAtHostDisabled, + SubnetID: fmt.Sprintf( + "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s", + mockSubscriptionID, mockVnetRG, mockVnetName, mockSubnetName, + ), + Count: 1, + }, + }), + givenOc: getGivenOc(clusterID), + }, { name: "machine set objects exist - invalid provider spec JSON", client: machinefake.NewSimpleClientset(createMachineSet("fake-worker-profile-1", invalidProvSpec)), @@ -155,6 +197,36 @@ func TestWorkerProfilesEnricherTask(t *testing.T) { } } +func TestSafeUnmarshalProviderSpec(t *testing.T) { + t.Run("coerces zone value to string", func(t *testing.T) { + spec, err := safeUnmarshalProviderSpec(invalidProvSpecZoneAsInt().Value.Raw) + if err != nil { + t.Fatal(err) + } + + if spec.Zone == nil || *spec.Zone != "1" { + t.Fatalf("zone=%v, want %q", spec.Zone, "1") + } + }) + + t.Run("coerces tag values to strings", func(t *testing.T) { + spec, err := safeUnmarshalProviderSpec(invalidProvSpecTagsAsInt().Value.Raw) + if err != nil { + t.Fatal(err) + } + if spec.Tags["field2"] != "2" { + t.Fatalf("tag field2=%q, want %q", spec.Tags["field2"], "2") + } + }) + + t.Run("malformed json returns error", func(t *testing.T) { + _, err := safeUnmarshalProviderSpec([]byte("{invalid")) + if err == nil { + t.Fatal("expected error for malformed provider spec") + } + }) +} + // This function creates a new MachineSet object with the given name and ProviderSpec. func createMachineSet(name string, ProvSpec machinev1beta1.ProviderSpec) *machinev1beta1.MachineSet { return &machinev1beta1.MachineSet{ @@ -205,6 +277,39 @@ func validProvSpec() machinev1beta1.ProviderSpec { } } +func validProvSpecWithSecurityAndDES() machinev1beta1.ProviderSpec { + return machinev1beta1.ProviderSpec{ + Value: &kruntime.RawExtension{ + Raw: []byte(fmt.Sprintf(`{ + "apiVersion": "machine.openshift.io/v1beta1", + "kind": "AzureMachineProviderSpec", + "tags": { + "field1": "value1", + "field2": "value2" + }, + "osDisk": { + "diskSizeGB": 512, + "managedDisk": { + "diskEncryptionSet": { + "id": "%s" + } + } + }, + "securityProfile": { + "encryptionAtHost": true + }, + "vmSize": "Standard_D4s_v3", + "networkResourceGroup": "%s", + "vnet": "%s", + "subnet": "%s", + "zone": "1" +}`, + mockDiskEncryptionSetID, mockVnetRG, mockVnetName, mockSubnetName, + )), + }, + } +} + func invalidProvSpecZoneAsInt() machinev1beta1.ProviderSpec { return machinev1beta1.ProviderSpec{ Value: &kruntime.RawExtension{ From 032a833a1cf208f706d8c8dd529335381275130d Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 18:16:49 +0200 Subject: [PATCH 06/12] [ARO-24603] Reject malformed versions in VM size validation Restore the previous fail-closed behavior so malformed or empty cluster versions cannot bypass VM-size validation for otherwise-supported SKUs. Made-with: Cursor --- pkg/api/validate/vm.go | 9 +++++---- pkg/api/validate/vm_test.go | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/pkg/api/validate/vm.go b/pkg/api/validate/vm.go index 9bdcfc32f2c..bf7db887a70 100644 --- a/pkg/api/validate/vm.go +++ b/pkg/api/validate/vm.go @@ -42,6 +42,11 @@ func VMSizeIsValidForVersion(vmSize vms.VMSize, isMaster bool, v string, isCI bo return false } + clusterVersion, err := version.ParseVersion(v) + if err != nil { + return false + } + role := vms.VMRoleWorker if isMaster { role = vms.VMRoleMaster @@ -52,10 +57,6 @@ func VMSizeIsValidForVersion(vmSize vms.VMSize, isMaster bool, v string, isCI bo // If the VM size has a minimum version requirement, check it if sizeInfo.MinimumVersion != nil { - clusterVersion, err := version.ParseVersion(v) - if err != nil { - return false - } return clusterVersion.Gt(sizeInfo.MinimumVersion) || clusterVersion.Eq(sizeInfo.MinimumVersion) } diff --git a/pkg/api/validate/vm_test.go b/pkg/api/validate/vm_test.go index bbcb762706a..f0dcc8f233e 100644 --- a/pkg/api/validate/vm_test.go +++ b/pkg/api/validate/vm_test.go @@ -533,6 +533,22 @@ func TestVMSizeIsValidForVersion(t *testing.T) { isCI: false, desiredResult: false, }, + { + name: "Standard_D8s_v5 with invalid version is rejected", + vmSize: vms.VMSizeStandardD8sV5, + isMaster: true, + version: "invalid.version", + isCI: false, + desiredResult: false, + }, + { + name: "Standard_F72s_v2 with empty version is rejected", + vmSize: vms.VMSizeStandardF72sV2, + isMaster: false, + version: "", + isCI: false, + desiredResult: false, + }, // Test existing VM sizes still work with version validation { name: "Standard_D8s_v5 is valid for any version as master", From 54ca80512895c003b1bfbcccb90409e048654df3 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 18:17:56 +0200 Subject: [PATCH 07/12] [ARO-24603] Skip SP cleanup for workload identity deletes Restore the workload-identity early return so delete flows do not dereference service-principal clients for clusters that no longer use that cleanup path. Made-with: Cursor --- pkg/util/cluster/cluster.go | 1 + .../cluster/delete_roleassignments_test.go | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 pkg/util/cluster/delete_roleassignments_test.go diff --git a/pkg/util/cluster/cluster.go b/pkg/util/cluster/cluster.go index 08a207e230d..24d34672e12 100644 --- a/pkg/util/cluster/cluster.go +++ b/pkg/util/cluster/cluster.go @@ -1422,6 +1422,7 @@ func (c *Cluster) fixupNSGs(ctx context.Context, vnetResourceGroup, clusterName func (c *Cluster) deleteRoleAssignments(ctx context.Context, vnetResourceGroup, clusterName string) error { if c.Config.UseWorkloadIdentity { c.log.Print("Skipping deletion of service principal role assignments") + return nil } c.log.Print("deleting role assignments") oc, err := c.openshiftclusters.Get(ctx, vnetResourceGroup, clusterName) diff --git a/pkg/util/cluster/delete_roleassignments_test.go b/pkg/util/cluster/delete_roleassignments_test.go new file mode 100644 index 00000000000..7d14d73a6ce --- /dev/null +++ b/pkg/util/cluster/delete_roleassignments_test.go @@ -0,0 +1,28 @@ +package cluster + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestDeleteRoleAssignmentsSkipsServicePrincipalCleanupForWorkloadIdentity(t *testing.T) { + c := &Cluster{ + log: logrus.NewEntry(logrus.New()), + Config: &ClusterConfig{UseWorkloadIdentity: true}, + } + + defer func() { + if recovered := recover(); recovered != nil { + t.Fatalf("deleteRoleAssignments panicked instead of short-circuiting: %v", recovered) + } + }() + + if err := c.deleteRoleAssignments(context.Background(), "test-rg", "test-cluster"); err != nil { + t.Fatalf("deleteRoleAssignments() error = %v, want nil", err) + } +} From 5fcf10cbd342ea97e966dc185ce93a0c757f1130 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 16 Apr 2026 18:23:34 +0200 Subject: [PATCH 08/12] [ARO-24603] Make admin VM size checks CI-aware Use the testing VM-size tables for admin validation and discovery in CI so resize and preflight endpoints stay aligned with the create/update paths on this rebased stack. Made-with: Cursor --- ...in_openshiftcluster_resize_controlplane.go | 2 +- .../admin_openshiftcluster_vmresize.go | 2 +- ...penshiftcluster_vmresize_pre_validation.go | 2 +- .../admin_supportedvmsizes_list_test.go | 21 +++++++++++++++++++ pkg/frontend/admin_supportvmsizes_list.go | 3 ++- pkg/frontend/shared_test.go | 1 + pkg/frontend/validate.go | 12 +++++++++-- pkg/frontend/validate_test.go | 9 +++++++- 8 files changed, 45 insertions(+), 7 deletions(-) diff --git a/pkg/frontend/admin_openshiftcluster_resize_controlplane.go b/pkg/frontend/admin_openshiftcluster_resize_controlplane.go index 1b1464012c5..223e33e9a06 100644 --- a/pkg/frontend/admin_openshiftcluster_resize_controlplane.go +++ b/pkg/frontend/admin_openshiftcluster_resize_controlplane.go @@ -71,7 +71,7 @@ func (f *frontend) _postAdminResizeControlPlane(log *logrus.Entry, ctx context.C } } - if err := validateAdminMasterVMSize(vmSize); err != nil { + if err := validateAdminMasterVMSize(vmSize, f.env.IsCI()); err != nil { return err } diff --git a/pkg/frontend/admin_openshiftcluster_vmresize.go b/pkg/frontend/admin_openshiftcluster_vmresize.go index 19cd68cd17a..536f7a1fa01 100644 --- a/pkg/frontend/admin_openshiftcluster_vmresize.go +++ b/pkg/frontend/admin_openshiftcluster_vmresize.go @@ -37,7 +37,7 @@ func (f *frontend) _postAdminOpenShiftClusterVMResize(log *logrus.Entry, ctx con return err } - err = validateAdminMasterVMSize(vmSize) + err = validateAdminMasterVMSize(vmSize, f.env.IsCI()) if err != nil { return err } diff --git a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go index e1d0ffe6533..452a8f6b830 100644 --- a/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go +++ b/pkg/frontend/admin_openshiftcluster_vmresize_pre_validation.go @@ -433,7 +433,7 @@ func (f *frontend) validateVMSKU( return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "vmSize", "The provided vmSize is empty.") } - err := validateAdminMasterVMSize(desiredVMSize) + err := validateAdminMasterVMSize(desiredVMSize, f.env.IsCI()) if err != nil { return err } diff --git a/pkg/frontend/admin_supportedvmsizes_list_test.go b/pkg/frontend/admin_supportedvmsizes_list_test.go index 39d07553ab7..179f93b9e45 100644 --- a/pkg/frontend/admin_supportedvmsizes_list_test.go +++ b/pkg/frontend/admin_supportedvmsizes_list_test.go @@ -8,8 +8,10 @@ import ( "testing" "github.com/go-test/deep" + "go.uber.org/mock/gomock" "github.com/Azure/ARO-RP/pkg/api/util/vms" + mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env" utilerror "github.com/Azure/ARO-RP/test/util/error" ) @@ -67,3 +69,22 @@ func TestSupportedvmsizes(t *testing.T) { }) } } + +func TestSupportedvmsizesIncludesTestingSizesInCI(t *testing.T) { + controller := gomock.NewController(t) + mockEnv := mock_env.NewMockInterface(controller) + mockEnv.EXPECT().IsCI().Return(true) + + f := &frontend{env: mockEnv} + gotResponse, err := f.supportedVMSizesForRole(vms.VMRoleMaster) + utilerror.AssertErrorMessage(t, err, "") + + got := map[vms.VMSize]vms.VMSizeStruct{} + if err := json.Unmarshal(gotResponse, &got); err != nil { + t.Fatal(err) + } + + if _, ok := got[vms.VMSizeStandardD4sV3]; !ok { + t.Fatalf("expected CI master list to include %q, got %v", vms.VMSizeStandardD4sV3, got) + } +} diff --git a/pkg/frontend/admin_supportvmsizes_list.go b/pkg/frontend/admin_supportvmsizes_list.go index 0a82a452184..2c59e1303e5 100644 --- a/pkg/frontend/admin_supportvmsizes_list.go +++ b/pkg/frontend/admin_supportvmsizes_list.go @@ -27,7 +27,8 @@ func (f *frontend) supportedVMSizesForRole(vmRole vms.VMRole) ([]byte, error) { if vmRole != vms.VMRoleMaster && vmRole != vms.VMRoleWorker { return nil, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "", fmt.Sprintf("The provided vmRole '%s' is invalid. vmRole can only be master or worker", vmRole)) } - vmsizes := vms.SupportedVMSizesByRole[vmRole] + isCI := f.env != nil && f.env.IsCI() + vmsizes := adminSupportedVMSizesByRole(isCI)[vmRole] b, err := json.MarshalIndent(vmsizes, "", " ") if err != nil { return b, err diff --git a/pkg/frontend/shared_test.go b/pkg/frontend/shared_test.go index f249b69ad2e..d1bcce2d77d 100644 --- a/pkg/frontend/shared_test.go +++ b/pkg/frontend/shared_test.go @@ -119,6 +119,7 @@ func newTestInfraWithFeatures(t *testing.T, features map[env.Feature]bool) *test _env := mock_env.NewMockInterface(controller) _env.EXPECT().IsLocalDevelopmentMode().AnyTimes().Return(false) + _env.EXPECT().IsCI().AnyTimes().Return(false) _env.EXPECT().Environment().AnyTimes().Return(&azureclient.PublicCloud) _env.EXPECT().Hostname().AnyTimes().Return("testhost") _env.EXPECT().Location().AnyTimes().Return("eastus") diff --git a/pkg/frontend/validate.go b/pkg/frontend/validate.go index bef23b444a5..5c36e8b68f1 100644 --- a/pkg/frontend/validate.go +++ b/pkg/frontend/validate.go @@ -217,9 +217,9 @@ func validateNetworkInterfaceName(nicName string) error { return nil } -func validateAdminMasterVMSize(vmSize string) error { +func validateAdminMasterVMSize(vmSize string, isCI bool) error { // check to ensure that the target size is supported as a master size - for k := range vms.SupportedVMSizesByRole[vms.VMRoleMaster] { + for k := range adminSupportedVMSizesByRole(isCI)[vms.VMRoleMaster] { if strings.EqualFold(string(k), vmSize) { return nil } @@ -228,6 +228,14 @@ func validateAdminMasterVMSize(vmSize string) error { return api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidParameter, "", fmt.Sprintf("The provided vmSize '%s' is unsupported for master.", vmSize)) } +func adminSupportedVMSizesByRole(isCI bool) map[vms.VMRole]map[vms.VMSize]vms.VMSizeStruct { + if isCI { + return vms.SupportedVMSizesByRoleForTesting + } + + return vms.SupportedVMSizesByRole +} + // validateInstallVersion validates the install version set in the clusterprofile.version // TODO convert this into static validation instead of this receiver function in the validation for frontend. func (f *frontend) validateInstallVersion(ctx context.Context, oc *api.OpenShiftCluster) error { diff --git a/pkg/frontend/validate_test.go b/pkg/frontend/validate_test.go index 2350658587b..4cbade3d5d9 100644 --- a/pkg/frontend/validate_test.go +++ b/pkg/frontend/validate_test.go @@ -237,6 +237,7 @@ func TestValidateAdminMasterVMSize(t *testing.T) { for _, tt := range []struct { test string vmSize string + isCI bool wantErr string }{ { @@ -259,9 +260,15 @@ func TestValidateAdminMasterVMSize(t *testing.T) { vmSize: "silly_d8s_v10", wantErr: "400: InvalidParameter: : The provided vmSize 'silly_d8s_v10' is unsupported for master.", }, + { + test: "ci-only master size is supported in ci", + vmSize: "Standard_D4s_v3", + isCI: true, + wantErr: "", + }, } { t.Run(tt.test, func(t *testing.T) { - err := validateAdminMasterVMSize(tt.vmSize) + err := validateAdminMasterVMSize(tt.vmSize, tt.isCI) if err != nil && err.Error() != tt.wantErr || err == nil && tt.wantErr != "" { t.Error(err) From dc6e1206ddc89e76fe1f5d92b19fcd260c3845a6 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Fri, 17 Apr 2026 09:30:28 +0200 Subject: [PATCH 09/12] [ARO-24603] Fix copyright header placement in vms/types.go Move the copyright/license header before imports to match repo convention. Co-Authored-By: Claude Opus 4.7 (1M context) --- pkg/api/util/vms/types.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/api/util/vms/types.go b/pkg/api/util/vms/types.go index ade5c22a20a..9967c6eaab7 100644 --- a/pkg/api/util/vms/types.go +++ b/pkg/api/util/vms/types.go @@ -1,14 +1,14 @@ package vms +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + import ( "encoding/json" "github.com/Azure/ARO-RP/pkg/api/util/version" ) -// Copyright (c) Microsoft Corporation. -// Licensed under the Apache License 2.0. - // VMRole represents a VM role = [master, worker] type VMRole string From 4dc239f83c1590e3d6aa5d17d0805edd97da06db Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Fri, 17 Apr 2026 09:31:52 +0200 Subject: [PATCH 10/12] [ARO-24603] Remove stale TODO and fix log message typo in cluster.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove outdated MAITIU TODO (fields are actively used) and fix "roleassignments" → "role assignments" spacing in log message. Co-Authored-By: Claude Opus 4.7 (1M context) --- pkg/util/cluster/cluster.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/util/cluster/cluster.go b/pkg/util/cluster/cluster.go index 24d34672e12..12a1324dbf8 100644 --- a/pkg/util/cluster/cluster.go +++ b/pkg/util/cluster/cluster.go @@ -75,9 +75,8 @@ type ClusterConfig struct { NoInternet bool `mapstructure:"NO_INTERNET"` MockMSIObjectID string `mapstructure:"MOCK_MSI_OBJECT_ID"` - MasterVMSize vms.VMSize `mapstructure:"MASTER_VM_SIZE"` - WorkerVMSize vms.VMSize `mapstructure:"WORKER_VM_SIZE"` - // TODO: MAITIU - Do we need to touch this? + MasterVMSize vms.VMSize `mapstructure:"MASTER_VM_SIZE"` + WorkerVMSize vms.VMSize `mapstructure:"WORKER_VM_SIZE"` CandidateMasterVMSizes []vms.VMSize `mapstructure:"MASTER_VM_SIZES"` CandidateWorkerVMSizes []vms.VMSize `mapstructure:"WORKER_VM_SIZES"` } @@ -1461,7 +1460,7 @@ func (c *Cluster) deleteRoleAssignments(ctx context.Context, vnetResourceGroup, func (c *Cluster) deleteWimiRoleAssignments(ctx context.Context, vnetResourceGroup string) error { if !c.Config.UseWorkloadIdentity { - c.log.Print("Skipping deletion of wimi roleassignments") + c.log.Print("Skipping deletion of wimi role assignments") return nil } c.log.Print("deleting wimi role assignments") From ea55a0e2b7352e2f4c1320ea7623b3b309533718 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 17 Apr 2026 07:58:24 +0000 Subject: [PATCH 11/12] [ARO-24603] Add OS_CLUSTER_VERSION to env.example for developer discoverability Agent-Logs-Url: https://github.com/Azure/ARO-RP/sessions/480f5a36-6a5e-43f9-b25b-b96f4ed2bd6f Co-authored-by: tuxerrante <8364469+tuxerrante@users.noreply.github.com> --- env.example | 3 +++ 1 file changed, 3 insertions(+) diff --git a/env.example b/env.example index f39099757dc..8072e5061bb 100644 --- a/env.example +++ b/env.example @@ -6,6 +6,9 @@ export AZURE_EXTENSION_DEV_SOURCES="$(pwd)/python" export CLUSTER_RESOURCEGROUP="${AZURE_PREFIX}-v4-$LOCATION" export CLUSTER_NAME="${AZURE_PREFIX}-aro-cluster" +# OS_CLUSTER_VERSION overrides the default OpenShift install version used by hack/cluster create. +# If unset, the default install stream version is used automatically. +# export OS_CLUSTER_VERSION="4.14.1" # CLUSTER is read by hack/cluster (Go) via mapstructure; CLUSTER_NAME is used # by hack/setup_resources.sh and az aro CLI commands. export CLUSTER="${CLUSTER_NAME}" From 72a90755b7da82076a4e6a86d58bcd0be3877c59 Mon Sep 17 00:00:00 2001 From: Alessandro Affinito Date: Thu, 7 May 2026 17:38:51 +0200 Subject: [PATCH 12/12] test(api): pin rebased vm size carry-forwards Record the formatter-required spacing changes and add regression coverage for the D48ds_v5 and E96ds_v5 entries that were forward-ported during the rebase. Co-authored-by: Cursor --- pkg/api/admin/openshiftcluster.go | 1 + pkg/api/openshiftcluster.go | 1 + pkg/api/util/vms/sizes_test.go | 12 ++++++++++++ pkg/api/validate/vm_test.go | 14 ++++++++++++++ 4 files changed, 28 insertions(+) diff --git a/pkg/api/admin/openshiftcluster.go b/pkg/api/admin/openshiftcluster.go index 5cbf6625565..345306433f8 100644 --- a/pkg/api/admin/openshiftcluster.go +++ b/pkg/api/admin/openshiftcluster.go @@ -274,6 +274,7 @@ type MasterProfile struct { EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"` DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"` } + // WorkerProfile represents a worker profile. type WorkerProfile struct { Name string `json:"name,omitempty"` diff --git a/pkg/api/openshiftcluster.go b/pkg/api/openshiftcluster.go index 1b6665850cc..680cfdddad9 100644 --- a/pkg/api/openshiftcluster.go +++ b/pkg/api/openshiftcluster.go @@ -448,6 +448,7 @@ type MasterProfile struct { EncryptionAtHost EncryptionAtHost `json:"encryptionAtHost,omitempty"` DiskEncryptionSetID string `json:"diskEncryptionSetId,omitempty"` } + // WorkerProfile represents a worker profile type WorkerProfile struct { MissingFields diff --git a/pkg/api/util/vms/sizes_test.go b/pkg/api/util/vms/sizes_test.go index 75f3665e8d5..a5eb65120b5 100644 --- a/pkg/api/util/vms/sizes_test.go +++ b/pkg/api/util/vms/sizes_test.go @@ -37,6 +37,18 @@ func TestLookupVMSizeFindsKnownAndUnknownSizes(t *testing.T) { wantFound: true, wantCoreCount: 4, }, + { + name: "finds rebased worker DDSv5 carry-forward", + vmSize: VMSizeStandardD48dsV5, + wantFound: true, + wantCoreCount: 48, + }, + { + name: "finds rebased worker EDSv5 carry-forward", + vmSize: VMSizeStandardE96dsV5, + wantFound: true, + wantCoreCount: 96, + }, { name: "finds CI-only worker size with minimum version", vmSize: VMSizeStandardD2sV6, diff --git a/pkg/api/validate/vm_test.go b/pkg/api/validate/vm_test.go index f0dcc8f233e..aa67f85aa12 100644 --- a/pkg/api/validate/vm_test.go +++ b/pkg/api/validate/vm_test.go @@ -81,6 +81,20 @@ func TestVMSizeIsValid(t *testing.T) { isCI: false, desiredResult: true, }, + { + name: "Standard_D48ds_v5 is supported for use in ARO as worker node", + vmSize: vms.VMSizeStandardD48dsV5, + isMaster: false, + isCI: false, + desiredResult: true, + }, + { + name: "Standard_E96ds_v5 is supported for use in ARO as worker node", + vmSize: vms.VMSizeStandardE96dsV5, + isMaster: false, + isCI: false, + desiredResult: true, + }, // CI mode tests { name: "CI mode: Standard_D2s_v3 is valid as worker",