diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl
index 7fa81b56..c1981b51 100644
--- a/.terraform.lock.hcl
+++ b/.terraform.lock.hcl
@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
- version = "6.22.0"
+ version = "6.23.0"
constraints = ">= 4.56.0"
hashes = [
- "h1:6UWaO66FYIrJuNiED+m0TUzhKjplwv6nfEckapn8uTI=",
- "h1:H7FbgHKqL2LcCdOsGJSK7tvcXSmk/0uqqcB5L3EV5jE=",
- "h1:MjRcKixrdKM/fdB5gs/uYnnMkj8NWEefDvi7hC7R5N4=",
- "h1:NE+sZVxV1n139f+Bm8WvHwv6uSE9Tb3CGPIWu5rkGa0=",
- "h1:O04Ld1zBvEwEigGNb6MH+aV4TUowlaoNxj2M2Rds7p4=",
- "h1:TV1UZ7DzioV1EUY/lMS+eIInU379DA1Q2QwnEGGZMks=",
- "h1:aMlndTHwtnbRKOMU3Myd/67/lOHzNH0356WijW7IDjY=",
- "h1:cnKrUUf5zpmPbuyiZxmcC9vUgKqomogWzrtgc8dVSGQ=",
- "h1:emjs7gQ5cvgo+xH9jVGJSQuikm6b0PBSMgX3+cz2Eeo=",
- "h1:p6Yc8y3iUuIq242eElIHPHf4OITRZFsrqF3nibtND2o=",
- "h1:q1qvcoLjtEKbdWdVLvJ/qzJLhIh2SUc2CoacNwLRmE8=",
- "h1:sIe9XGP/B32MJjtJ7FIM5lx7Bp6goG0TQppbrYf91mI=",
- "h1:t+roJOBBhutQ0CiZIrLABSHIT9delVCujnkRgiYehxE=",
- "h1:ud10sbNbWu35Of9WqHaT1asGATMMgwFE/DLWPajG+Y8=",
- "zh:0ed7ceb13bade9076021a14f995d07346d3063f4a419a904d5804d76e372bbda",
- "zh:195dcde5a4b0def82bc3379053edc13941ff94ea5905808fe575f7c7bbd66693",
- "zh:4047c4dba121d29859b72d2155c47f969b41d3c5768f73dff5d8a0cc55f74e52",
- "zh:5694f37d6ea69b6f96dfb30d53e66f7a41c1aad214c212b6ffa54bdd799e3b27",
- "zh:6cf8bb7d984b1fae9fd10d6ce1e62f6c10751a1040734b75a1f7286609782e49",
- "zh:737d0e600dfe2626b4d6fc5dd2b24c0997fd983228a7a607b9176a1894a281a0",
- "zh:7d328a195ce36b1170afe6758cf88223c8765620211f5cc0451bdd6899243b4e",
- "zh:7edb4bc34baeba92889bd9ed50b34c04b3eeb3d8faa8bb72699c6335a2e95bab",
- "zh:8e71836814e95454b00c51f3cb3e10fd78a59f7dc4c5362af64233fee989790d",
- "zh:9367f63b23d9ddfab590b2247a8ff5ccf83410cbeca43c6e441c488c45efff4c",
+ "h1:7ajuVQ4unODSueDbHuq24Pv7MSnsdZnPqA29ZRirTzs=",
+ "h1:8pfQCbr4JFt9slLo6/LnRQbenI6BlPMVsDBm2MZLLuQ=",
+ "h1:GjNa/4CJOShckzZ4P8Z2Qphs8zFm3pxotKaSabMMHy4=",
+ "h1:LcAo25tYBBX5ubdZ1tu7kpH6jtO8GgcSRyyoouL7Oto=",
+ "h1:V2GnNrwvH+cSwCF45lYlc22kNOgEsmGaTHVO3twktMk=",
+ "h1:WMdBPDu4z8C3SkGowzfQ1nXkKZcsvpzvlwx1qBA1Ggk=",
+ "h1:YDRZxLbNCTn9DD1axdrRxUzboIZ5PZqjWupnxLN3wvM=",
+ "h1:ZEeZ8ImbH5NQF4O6erecp91uPcnTxVNzRTvLo60KAPg=",
+ "h1:a39B6utBFSZYMI4+454ABabT9T/XqGnyMbka8wWVgbU=",
+ "h1:bcsBqGyclIvX7fgmX4VQvePf0Q4QbDGdxV0HbCnx+pI=",
+ "h1:lYPCZk9QYoxbut9vR9HakLDweaCAguRdLs7HSrenYKQ=",
+ "h1:r5ArLmBzNbxB3d9E7rHnpvGfJmfunCR2CnfYKcmH3vk=",
+ "h1:rue7iU6J6MznSVz3EOAy/0Ko26Gh0oH1QYxqAxUmqWg=",
+ "h1:ulKASxaBykWqbDxshTQRIavSn1Ux9OWM3Yvzc1HvEns=",
+ "zh:254eab8da4534fe937cb48992e4843d4165a6455eb6e989ef6cae7ebeb6fc107",
+ "zh:3c3913e4853c491ae3b65560fd5911c21959b57f2d1f890c252a12d4ac201852",
+ "zh:5baf2900b4b5fd7be40b0ce219359a6ec9a8896f267b932a249cee6cf0eccf09",
+ "zh:6c0784a51dfc31390a4d6a93f61c91c307fe8625872227236ba4dac5a59d33f6",
+ "zh:6c6af8b96d14893edb4e23c877a406ed85b970546c3b318edb20da05e505cd43",
+ "zh:732724c69ec82582804052ebe1f01d470e4af9a6c42a2a5d17df5da3d7e6e5af",
+ "zh:869c3c71510d9079ca7147a1660c233cffc872840995c4a3745ae26a59431414",
+ "zh:99db418c8b2473366a944164b7ce25c8fbcfd8ba6d1146154d885ce898351be0",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
- "zh:a007de80ffde8539a73ee39fcfbe7ed12e025c98cd29b2110a7383b41a4aad39",
- "zh:aae7b7aed8bf3a4bea80a9a2f08fef1adeb748beff236c4a54af93bb6c09a56c",
- "zh:b5a16b59d4210c1eaf35c8c027ecdab9e074dd081d602f5112eecdebf2e1866d",
- "zh:d479bad0a004e4893bf0ba6c6cd867fefd14000051bbe3de5b44a925e3d46cd5",
+ "zh:a54c2a3672d50a4c17ca04230af7a236b3d7f13418cc4b1982c2c4523744d5de",
+ "zh:acfd7014b33af2f36da63ebda72c500dc3e9bf1f2497b5423480bfce7db22fe8",
+ "zh:af0a81fcdf53064521ff9fb3d4e3e98098d5de04ed2938704807b2ef454456b5",
+ "zh:b042d5546151256a08f58eb7eaa1f65911b77be4ae2988c993d7ee94de1520a8",
+ "zh:cd033e0e52a2f13b59ab2e9cb84870306f3143d0793e0161a7ec12765e949a28",
+ "zh:d04a9e1861a08bdbe3a2be4c77680933cbbd8b87e6879d8079c3e5cd8d0f1ee7",
]
}
diff --git a/README.md b/README.md
index 13b62e23..17174c79 100644
--- a/README.md
+++ b/README.md
@@ -8,6 +8,7 @@
+
π₯ Watch a demo | π Docs | π Sign up | π Follow us
diff --git a/cmd/changes_submit_plan.go b/cmd/changes_submit_plan.go
index 6d00ff19..fbd3626f 100644
--- a/cmd/changes_submit_plan.go
+++ b/cmd/changes_submit_plan.go
@@ -158,6 +158,14 @@ func SubmitPlan(cmd *cobra.Command, args []string) error {
}
}
+ labels, err := parseLabelsArgument()
+ if err != nil {
+ return loggedError{
+ err: err,
+ fields: lf,
+ message: "Failed to parse labels",
+ }
+ }
properties := &sdp.ChangeProperties{
Title: title,
Description: viper.GetString("description"),
@@ -167,6 +175,7 @@ func SubmitPlan(cmd *cobra.Command, args []string) error {
CodeChanges: codeChangesOutput,
Repo: repoUrl,
EnrichedTags: enrichedTags,
+ Labels: labels,
}
if changeUUID == uuid.Nil {
diff --git a/cmd/flags.go b/cmd/flags.go
index 95cdf57f..c48d6465 100644
--- a/cmd/flags.go
+++ b/cmd/flags.go
@@ -2,6 +2,7 @@ package cmd
import (
"fmt"
+ "strconv"
"strings"
"github.com/overmindtech/cli/sdp-go"
@@ -30,6 +31,7 @@ func addChangeCreationFlags(cmd *cobra.Command) {
cmd.PersistentFlags().String("terraform-plan-output", "", "Filename of cached terraform plan output for this change.")
cmd.PersistentFlags().String("code-changes-diff", "", "Filename of the code diff of this change.")
cmd.PersistentFlags().StringSlice("tags", []string{}, "Tags to apply to this change, these should be specified in key=value format. Multiple tags can be specified by repeating the flag or using a comma separated list.")
+ cmd.PersistentFlags().StringSlice("labels", []string{}, "Labels to apply to this change, these should be specified in name=color format where color is a hex code (e.g., FF0000 or #FF0000). Multiple labels can be specified by repeating the flag or using a comma separated list.")
}
func parseTagsArgument() (*sdp.EnrichedTags, error) {
@@ -58,6 +60,45 @@ func parseTagsArgument() (*sdp.EnrichedTags, error) {
return enrichedTags, nil
}
+func parseLabelsArgument() ([]*sdp.Label, error) {
+ labels := make([]*sdp.Label, 0)
+ for _, label := range viper.GetStringSlice("labels") {
+ parts := strings.SplitN(label, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid label format: %s (expected name=color)", label)
+ }
+ if parts[0] == "" {
+ return nil, fmt.Errorf("invalid label format: %s (label name cannot be empty)", label)
+ }
+
+ // Normalise colour: strip leading # if present, validate, then add # back
+ colour := strings.TrimPrefix(parts[1], "#")
+ if colour == "" {
+ return nil, fmt.Errorf("invalid colour format: %s (colour cannot be empty)", parts[1])
+ }
+
+ // Validate it's exactly 6 hex digits
+ if len(colour) != 6 {
+ return nil, fmt.Errorf("invalid colour format: %s (must be 6 hex digits, got %d)", parts[1], len(colour))
+ }
+
+ // Validate all characters are valid hex digits
+ if _, err := strconv.ParseUint(colour, 16, 64); err != nil {
+ return nil, fmt.Errorf("invalid colour format: %s (must be valid hex digits)", parts[1])
+ }
+
+ // Normalise to canonical form: always #rrggbb
+ normalisedColour := "#" + strings.ToUpper(colour)
+
+ labels = append(labels, &sdp.Label{
+ Name: parts[0],
+ Colour: normalisedColour,
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ })
+ }
+ return labels, nil
+}
+
// Adds common flags to API commands e.g. timeout
func addAPIFlags(cmd *cobra.Command) {
cmd.PersistentFlags().String("timeout", "10m", "How long to wait for responses")
diff --git a/cmd/flags_test.go b/cmd/flags_test.go
new file mode 100644
index 00000000..14e7e14b
--- /dev/null
+++ b/cmd/flags_test.go
@@ -0,0 +1,182 @@
+package cmd
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/overmindtech/cli/sdp-go"
+ "github.com/spf13/viper"
+)
+
+func TestParseLabelsArgument(t *testing.T) {
+ tests := []struct {
+ name string
+ labels []string
+ want []*sdp.Label
+ errorContains string
+ }{
+ {
+ name: "empty labels",
+ labels: []string{},
+ want: []*sdp.Label{},
+ },
+ {
+ name: "single label with hash",
+ labels: []string{"label1=#FF0000"},
+ want: []*sdp.Label{
+ {
+ Name: "label1",
+ Colour: "#FF0000",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ },
+ },
+ {
+ name: "single label without hash",
+ labels: []string{"label1=ff0000"},
+ want: []*sdp.Label{
+ {
+ Name: "label1",
+ Colour: "#FF0000",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ },
+ },
+ {
+ name: "single label with lowercase hex",
+ labels: []string{"label1=abc123"},
+ want: []*sdp.Label{
+ {
+ Name: "label1",
+ Colour: "#ABC123",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ },
+ },
+ {
+ name: "multiple labels with hash",
+ labels: []string{"label1=#FF0000", "label2=#00FF00", "label3=#0000FF"},
+ want: []*sdp.Label{
+ {
+ Name: "label1",
+ Colour: "#FF0000",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ {
+ Name: "label2",
+ Colour: "#00FF00",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ {
+ Name: "label3",
+ Colour: "#0000FF",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ },
+ },
+ {
+ name: "multiple labels mixed hash and no hash",
+ labels: []string{"label1=#FF0000", "label2=00FF00", "label3=#0000FF"},
+ want: []*sdp.Label{
+ {
+ Name: "label1",
+ Colour: "#FF0000",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ {
+ Name: "label2",
+ Colour: "#00FF00",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ {
+ Name: "label3",
+ Colour: "#0000FF",
+ Type: sdp.LabelType_LABEL_TYPE_USER,
+ },
+ },
+ },
+ {
+ name: "missing equals sign",
+ labels: []string{"label1FF0000"},
+ errorContains: "invalid label format",
+ },
+ {
+ name: "empty label name",
+ labels: []string{"=#FF0000"},
+ errorContains: "label name cannot be empty",
+ },
+ {
+ name: "empty colour",
+ labels: []string{"label1="},
+ errorContains: "colour cannot be empty",
+ },
+ {
+ name: "colour too short",
+ labels: []string{"label1=#FF00"},
+ errorContains: "must be 6 hex digits",
+ },
+ {
+ name: "colour too long",
+ labels: []string{"label1=#FF00000"},
+ errorContains: "must be 6 hex digits",
+ },
+ {
+ name: "invalid hex characters",
+ labels: []string{"label1=#GGGGGG"},
+ errorContains: "must be valid hex digits",
+ },
+ {
+ name: "colour without hash too short",
+ labels: []string{"label1=FF00"},
+ errorContains: "must be 6 hex digits",
+ },
+ {
+ name: "colour without hash invalid characters",
+ labels: []string{"label1=ZZZZZZ"},
+ errorContains: "must be valid hex digits",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Set up viper with test labels
+ viper.Reset()
+ viper.Set("labels", tt.labels)
+
+ got, err := parseLabelsArgument()
+
+ if tt.errorContains != "" {
+ if err == nil {
+ t.Errorf("parseLabelsArgument() expected error containing %q, got nil", tt.errorContains)
+ return
+ }
+ if !strings.Contains(err.Error(), tt.errorContains) {
+ t.Errorf("parseLabelsArgument() error = %v, want error containing %q", err, tt.errorContains)
+ }
+ return
+ }
+
+ if err != nil {
+ t.Errorf("parseLabelsArgument() unexpected error: %v", err)
+ return
+ }
+
+ if len(got) != len(tt.want) {
+ t.Errorf("parseLabelsArgument() returned %d labels, want %d", len(got), len(tt.want))
+ return
+ }
+
+ for i, wantLabel := range tt.want {
+ if got[i].GetName() != wantLabel.GetName() {
+ t.Errorf("parseLabelsArgument() label[%d].Name = %q, want %q", i, got[i].GetName(), wantLabel.GetName())
+ }
+ if got[i].GetColour() != wantLabel.GetColour() {
+ t.Errorf("parseLabelsArgument() label[%d].Colour = %q, want %q", i, got[i].GetColour(), wantLabel.GetColour())
+ }
+ if got[i].GetType() != wantLabel.GetType() {
+ t.Errorf("parseLabelsArgument() label[%d].Type = %v, want %v", i, got[i].GetType(), wantLabel.GetType())
+ }
+ }
+ })
+ }
+}
diff --git a/cmd/terraform_plan.go b/cmd/terraform_plan.go
index a7ea28a5..c3b19611 100644
--- a/cmd/terraform_plan.go
+++ b/cmd/terraform_plan.go
@@ -244,6 +244,12 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI
return nil
}
+ labels, err := parseLabelsArgument()
+ if err != nil {
+ uploadChangesSpinner.Fail(fmt.Sprintf("Uploading planned changes: failed to parse labels: %v", err))
+ return nil
+ }
+
properties := &sdp.ChangeProperties{
Title: title,
Description: viper.GetString("description"),
@@ -253,6 +259,7 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI
CodeChanges: codeChangesOutput,
Repo: repoUrl,
EnrichedTags: enrichedTags,
+ Labels: labels,
}
if changeUuid == uuid.Nil {
diff --git a/go.mod b/go.mod
index 89995a40..56cdd128 100644
--- a/go.mod
+++ b/go.mod
@@ -273,10 +273,15 @@ require (
sigs.k8s.io/yaml v1.6.0 // indirect
)
-require github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0
+require (
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0
+)
require (
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
)
diff --git a/go.sum b/go.sum
index 01d76818..b3513d52 100644
--- a/go.sum
+++ b/go.sum
@@ -78,16 +78,24 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16AP
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 h1:/Di3vB4sNeQ+7A8efjUVENvyB945Wruvstucqp7ZArg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0/go.mod h1:gM3K25LQlsET3QR+4V74zxCsFAy0r6xMNN9n80SZn+4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0 h1:lMW1lD/17LUA5z1XTURo7LcVG2ICBPlyMHjIUrcFZNQ=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0/go.mod h1:ceIuwmxDWptoW3eCqSXlnPsZFKh4X+R38dWPv7GS9Vs=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
@@ -308,7 +316,6 @@ github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
@@ -374,6 +381,8 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
+github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
diff --git a/sdp-go/config.pb.go b/sdp-go/config.pb.go
index a2059a5f..ba4c5842 100644
--- a/sdp-go/config.pb.go
+++ b/sdp-go/config.pb.go
@@ -193,7 +193,9 @@ type BlastRadiusConfig struct {
// request. Once a request has hit this limit, all currently running
// requests will be cancelled and the blast radius returned as-is
MaxItems int32 `protobuf:"varint,1,opt,name=maxItems,proto3" json:"maxItems,omitempty"`
- // How deeply to link when calculating the blast radius for a change
+ // How deeply to link when calculating the blast radius for a change. This
+ // is the maximum number of levels of links to traverse from the root item.
+ // Different implementations may differ in how they handle this.
LinkDepth int32 `protobuf:"varint,2,opt,name=linkDepth,proto3" json:"linkDepth,omitempty"`
// Maximum time duration for blast radius calculation. When this time limit
// is reached, the analysis gracefully continues with risks identified up to
diff --git a/sources/azure/docs/federated-credentials.md b/sources/azure/docs/federated-credentials.md
new file mode 100644
index 00000000..1187a537
--- /dev/null
+++ b/sources/azure/docs/federated-credentials.md
@@ -0,0 +1,273 @@
+# Azure Federated Credentials Implementation
+
+## Overview
+
+The Azure source now supports federated credential authentication using the Azure SDK's `DefaultAzureCredential`. This provides a flexible authentication mechanism that automatically handles multiple authentication methods, making it suitable for various deployment scenarios including Kubernetes workload identity, managed identity, and local development.
+
+## How It Works
+
+### DefaultAzureCredential Chain
+
+The `DefaultAzureCredential` attempts authentication using multiple methods in the following order:
+
+1. **Environment Variables** - Service principal or workload identity via environment variables
+2. **Workload Identity** - Kubernetes/EKS with OIDC federation (via `AZURE_FEDERATED_TOKEN_FILE`)
+3. **Managed Identity** - When running on Azure infrastructure (VMs, App Service, Functions, etc.)
+4. **Azure CLI** - Uses credentials from `az login` (ideal for local development)
+
+The first successful authentication method is used, and subsequent methods are not attempted.
+
+### Implementation Details
+
+#### Credential Initialization
+
+The credential initialization is handled in `sources/azure/shared/credentials.go`:
+
+```go
+func NewAzureCredential(ctx context.Context) (*azidentity.DefaultAzureCredential, error) {
+ cred, err := azidentity.NewDefaultAzureCredential(nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Azure credential: %w", err)
+ }
+ return cred, nil
+}
+```
+
+#### Client Initialization
+
+Azure SDK clients are initialized with the credential in `sources/azure/proc/proc.go`:
+
+```go
+// Initialize Azure credentials
+cred, err := azureshared.NewAzureCredential(ctx)
+if err != nil {
+ return fmt.Errorf("error creating Azure credentials: %w", err)
+}
+
+// Pass credentials to adapters
+discoveryAdapters, err := adapters(ctx, cfg.SubscriptionID, cfg.TenantID,
+ cfg.ClientID, cfg.Regions, cred, linker, true)
+```
+
+#### Resource Group Discovery
+
+The implementation automatically discovers all resource groups in the subscription and creates adapters for each:
+
+```go
+// Discover resource groups in the subscription
+rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil)
+pager := rgClient.NewListPager(nil)
+for pager.More() {
+ page, err := pager.NextPage(ctx)
+ for _, rg := range page.Value {
+ resourceGroups = append(resourceGroups, *rg.Name)
+ }
+}
+```
+
+#### Permission Verification
+
+The source verifies subscription access at startup:
+
+```go
+func checkSubscriptionAccess(ctx context.Context, subscriptionID string,
+ cred *azidentity.DefaultAzureCredential) error {
+
+ client, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create resource groups client: %w", err)
+ }
+
+ // Try to list resource groups to verify access
+ pager := client.NewListPager(nil)
+ _, err = pager.NextPage(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to verify subscription access: %w", err)
+ }
+
+ return nil
+}
+```
+
+## Environment Variables
+
+### Required Variables
+
+These variables must be set for the Azure source to function:
+
+- `AZURE_SUBSCRIPTION_ID` - The Azure subscription ID to discover resources in
+- `AZURE_TENANT_ID` - The Azure AD tenant ID
+- `AZURE_CLIENT_ID` - The application/client ID
+
+### Authentication Method Variables
+
+Depending on your authentication method, you may need additional variables:
+
+#### Service Principal with Client Secret
+
+```bash
+export AZURE_CLIENT_SECRET="your-client-secret"
+```
+
+#### Service Principal with Certificate
+
+```bash
+export AZURE_CLIENT_CERTIFICATE_PATH="/path/to/certificate.pem"
+```
+
+#### Federated Workload Identity (Kubernetes/EKS)
+
+```bash
+export AZURE_FEDERATED_TOKEN_FILE="/var/run/secrets/azure/tokens/azure-identity-token"
+```
+
+This is typically set automatically by the Azure Workload Identity webhook when running in Kubernetes with proper annotations.
+
+## Authentication Methods
+
+### 1. Workload Identity (Kubernetes with OIDC Federation)
+
+**Use Case:** Running in Kubernetes clusters (AKS, EKS, GKE) with Azure Workload Identity configured.
+
+**How It Works:**
+- The Kubernetes pod is annotated with an Azure AD application
+- Azure AD trusts the OIDC token from the Kubernetes cluster
+- A federated token file is mounted into the pod
+- `DefaultAzureCredential` reads this token and exchanges it for Azure credentials
+
+**Configuration:**
+```yaml
+# Pod annotation
+azure.workload.identity/client-id: "00000000-0000-0000-0000-000000000000"
+azure.workload.identity/tenant-id: "00000000-0000-0000-0000-000000000000"
+
+# Environment variables (set automatically by webhook)
+AZURE_CLIENT_ID: "00000000-0000-0000-0000-000000000000"
+AZURE_TENANT_ID: "00000000-0000-0000-0000-000000000000"
+AZURE_FEDERATED_TOKEN_FILE: "/var/run/secrets/azure/tokens/azure-identity-token"
+```
+
+**Reference:** [Azure Workload Identity Documentation](https://azure.github.io/azure-workload-identity/docs/)
+
+### 2. Service Principal (Environment Variables)
+
+**Use Case:** CI/CD pipelines, containerized deployments, or any scenario where you have a service principal.
+
+**Configuration:**
+```bash
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_SECRET="your-client-secret"
+```
+
+### 3. Managed Identity
+
+**Use Case:** Running on Azure infrastructure (VMs, App Service, Container Instances, etc.)
+
+**How It Works:**
+- Azure automatically provides credentials to the service
+- No credentials need to be stored or configured
+- `DefaultAzureCredential` automatically detects and uses managed identity
+
+**Configuration:**
+- System-assigned identity: No configuration needed
+- User-assigned identity: Set `AZURE_CLIENT_ID` to the identity's client ID
+
+### 4. Azure CLI (Local Development)
+
+**Use Case:** Local development and testing
+
+**Setup:**
+```bash
+# Login with Azure CLI
+az login
+
+# Set the subscription
+az account set --subscription "your-subscription-id"
+```
+
+**Configuration:**
+```bash
+# Only subscription ID is needed from environment
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+```
+
+The Azure source will use the credentials from `az login` automatically.
+
+## Required Azure Permissions
+
+The Azure source requires the following permissions on the subscription:
+
+### Built-in Role
+The minimum required role is **Reader** at the subscription level.
+
+### Specific Permissions
+- `Microsoft.Resources/subscriptions/resourceGroups/read` - List resource groups
+- `Microsoft.Compute/virtualMachines/read` - Read virtual machines
+- Additional read permissions for other resource types as adapters are added
+
+## Troubleshooting
+
+### Common Issues
+
+#### 1. "DefaultAzureCredential failed to retrieve a token"
+
+**Cause:** No valid authentication method is available.
+
+**Solution:**
+- Verify environment variables are set correctly
+- For local development, run `az login`
+- For workload identity, verify pod annotations and service account configuration
+
+#### 2. "Failed to verify subscription access"
+
+**Cause:** Credentials don't have access to the subscription, or subscription ID is incorrect.
+
+**Solution:**
+- Verify the subscription ID is correct
+- Ensure the identity has at least Reader role on the subscription
+- Check Azure AD tenant ID matches the subscription's tenant
+
+#### 3. "Failed to list resource groups"
+
+**Cause:** Missing permissions or network connectivity issues.
+
+**Solution:**
+- Verify the identity has `Microsoft.Resources/subscriptions/resourceGroups/read` permission
+- Check network connectivity to Azure (firewall, proxy)
+- Verify subscription ID is correct
+
+### Debugging
+
+Enable debug logging to see authentication details:
+
+```bash
+export LOG_LEVEL=debug
+```
+
+The logs will show:
+- Which authentication method is being used
+- Subscription access verification results
+- Resource group discovery progress
+- Adapter initialization details
+
+## Security Best Practices
+
+1. **Use Workload Identity in Kubernetes**: Preferred method as it avoids storing credentials
+2. **Use Managed Identity on Azure**: No credential management needed
+3. **Avoid Client Secrets in Code**: Always use environment variables
+4. **Rotate Credentials Regularly**: If using service principals with secrets
+5. **Principle of Least Privilege**: Grant only Reader role unless more is needed
+6. **Separate Identities per Environment**: Don't reuse production credentials in development
+
+## References
+
+- [Azure Identity SDK for Go](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
+- [DefaultAzureCredential Documentation](https://learn.microsoft.com/en-us/azure/developer/go/sdk/authentication/credential-chains)
+- [Azure Workload Identity](https://azure.github.io/azure-workload-identity/docs/)
+- [Azure Managed Identity](https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview)
+- [Azure RBAC Roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles)
+
diff --git a/sources/azure/docs/testing-federated-auth.md b/sources/azure/docs/testing-federated-auth.md
new file mode 100644
index 00000000..f19470c4
--- /dev/null
+++ b/sources/azure/docs/testing-federated-auth.md
@@ -0,0 +1,840 @@
+# Testing Azure Federated Authentication
+
+## Overview
+
+This document provides comprehensive testing scenarios for Azure federated authentication, including cross-cloud identity federation from AWS and GCP. These scenarios help verify that the Azure source correctly handles federated credentials in various deployment contexts.
+
+## Table of Contents
+
+1. [Local Testing with Azure CLI](#local-testing-with-azure-cli)
+2. [Service Principal Testing](#service-principal-testing)
+3. [AWS Identity to Azure Federation](#aws-identity-to-azure-federation)
+4. [GCP Service Account to Azure Federation](#gcp-service-account-to-azure-federation)
+5. [Kubernetes Workload Identity Testing](#kubernetes-workload-identity-testing)
+6. [Verification and Validation](#verification-and-validation)
+
+## Prerequisites
+
+### Azure Setup
+
+1. **Azure Subscription** with resources to discover
+2. **Azure AD Application** registered
+3. **Reader role** assigned to the application on the subscription
+4. **Resource Groups and VMs** created for testing (optional but recommended)
+
+### Tools Required
+
+- Azure CLI (`az`)
+- AWS CLI (`aws`) - for AWS federation testing
+- GCP CLI (`gcloud`) - for GCP federation testing
+- `kubectl` - for Kubernetes testing
+- `curl` or similar HTTP client
+- `jq` - for JSON parsing
+
+---
+
+## Local Testing with Azure CLI
+
+### Objective
+Verify that the Azure source works with Azure CLI credentials on a developer workstation.
+
+### Setup
+
+1. **Install Azure CLI:**
+```bash
+# macOS
+brew install azure-cli
+
+# Linux
+curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+
+# Windows
+# Download from https://aka.ms/installazurecliwindows
+```
+
+2. **Login to Azure:**
+```bash
+az login
+```
+
+3. **Select subscription:**
+```bash
+# List available subscriptions
+az account list --output table
+
+# Set active subscription
+az account set --subscription "your-subscription-id"
+
+# Verify
+az account show
+```
+
+### Configuration
+
+```bash
+# Set environment variables
+export AZURE_SUBSCRIPTION_ID=$(az account show --query id -o tsv)
+export AZURE_TENANT_ID=$(az account show --query tenantId -o tsv)
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000" # Your app's client ID
+export LOG_LEVEL=debug
+```
+
+### Run the Source
+
+```bash
+cd /workspace/sources/azure
+go run main.go
+```
+
+### Expected Output
+
+```
+INFO Using config from viper
+INFO Successfully initialized Azure credentials
+INFO Discovered resource groups count=5
+INFO Initialized Azure adapters adapter_count=5
+INFO Successfully verified subscription access
+INFO Starting healthcheck server port=8080
+INFO Sources initialized
+```
+
+### Verification
+
+```bash
+# Check health endpoint
+curl http://localhost:8080/healthz
+# Expected: "ok"
+
+# Check logs for authentication method
+# Should see: "Successfully initialized Azure credentials"
+```
+
+### Success Criteria
+
+- β
Source starts without errors
+- β
Health check returns "ok"
+- β
Resource groups discovered
+- β
Adapters initialized for each resource group
+- β
No authentication errors in logs
+
+---
+
+## Service Principal Testing
+
+### Objective
+Verify authentication using a service principal with client secret.
+
+### Setup
+
+1. **Create Service Principal:**
+```bash
+# Create with Reader role on subscription
+az ad sp create-for-rbac \
+ --name "test-overmind-azure-source" \
+ --role Reader \
+ --scopes "/subscriptions/$(az account show --query id -o tsv)" \
+ --output json > sp-credentials.json
+
+# View credentials
+cat sp-credentials.json
+```
+
+2. **Extract Credentials:**
+```bash
+export AZURE_SUBSCRIPTION_ID=$(az account show --query id -o tsv)
+export AZURE_TENANT_ID=$(jq -r '.tenant' sp-credentials.json)
+export AZURE_CLIENT_ID=$(jq -r '.appId' sp-credentials.json)
+export AZURE_CLIENT_SECRET=$(jq -r '.password' sp-credentials.json)
+export LOG_LEVEL=debug
+```
+
+### Test Service Principal
+
+```bash
+# Verify the service principal can authenticate
+az login --service-principal \
+ --username $AZURE_CLIENT_ID \
+ --password $AZURE_CLIENT_SECRET \
+ --tenant $AZURE_TENANT_ID
+
+# List resource groups to verify permissions
+az group list --output table
+
+# Logout (so the source uses environment variables, not CLI cache)
+az logout
+```
+
+### Run the Source
+
+```bash
+cd /workspace/sources/azure
+go run main.go
+```
+
+### Expected Output
+
+```
+DEBUG Initializing Azure credentials using DefaultAzureCredential
+INFO Successfully initialized Azure credentials auth.method=default-azure-credential
+INFO Discovered resource groups count=5
+INFO Successfully verified subscription access
+```
+
+### Verification
+
+```bash
+# Monitor logs for authentication
+# Should use environment variables, not Azure CLI
+
+# Verify it still works after Azure CLI logout
+curl http://localhost:8080/healthz
+```
+
+### Cleanup
+
+```bash
+# Delete test service principal
+az ad sp delete --id $AZURE_CLIENT_ID
+
+# Remove credentials file
+rm sp-credentials.json
+```
+
+### Success Criteria
+
+- β
Authentication works without Azure CLI session
+- β
Service principal credentials used from environment
+- β
All resources discovered successfully
+- β
Health check passes
+
+---
+
+## AWS Identity to Azure Federation
+
+### Objective
+Configure AWS IAM identity to authenticate to Azure using OIDC federation, simulating a scenario where the Azure source runs in EKS with AWS IRSA.
+
+### Architecture
+
+```
+AWS EKS Pod β AWS IAM Role β OIDC Token β Azure AD Federated Credential β Azure Access
+```
+
+### Prerequisites
+
+- AWS account with EKS cluster
+- Azure subscription and Azure AD tenant
+- OIDC issuer configured on EKS cluster
+
+### Step 1: Configure Azure AD Application
+
+```bash
+# Create Azure AD application
+az ad app create --display-name "test-aws-to-azure-federation" \
+ --output json > azure-app.json
+
+APP_OBJECT_ID=$(jq -r '.id' azure-app.json)
+APP_CLIENT_ID=$(jq -r '.appId' azure-app.json)
+
+echo "Azure AD Application Client ID: $APP_CLIENT_ID"
+```
+
+### Step 2: Get AWS EKS OIDC Issuer
+
+```bash
+# Get OIDC issuer URL from your EKS cluster
+export OIDC_ISSUER=$(aws eks describe-cluster \
+ --name your-eks-cluster-name \
+ --query "cluster.identity.oidc.issuer" \
+ --output text)
+
+# Remove https:// prefix
+export OIDC_ISSUER_URL=${OIDC_ISSUER#https://}
+
+echo "OIDC Issuer: $OIDC_ISSUER"
+```
+
+### Step 3: Create Federated Identity Credential in Azure
+
+```bash
+# Create federated credential that trusts AWS EKS OIDC
+az ad app federated-credential create \
+ --id $APP_OBJECT_ID \
+ --parameters '{
+ "name": "aws-eks-federation",
+ "issuer": "'"$OIDC_ISSUER"'",
+ "subject": "system:serviceaccount:default:azure-source-sa",
+ "audiences": ["sts.amazonaws.com"],
+ "description": "Federated credential for AWS EKS to Azure"
+ }'
+
+# Verify creation
+az ad app federated-credential list --id $APP_OBJECT_ID
+```
+
+### Step 4: Assign Azure Permissions
+
+```bash
+# Create service principal from app
+az ad sp create --id $APP_CLIENT_ID
+
+# Assign Reader role
+az role assignment create \
+ --role Reader \
+ --assignee $APP_CLIENT_ID \
+ --scope /subscriptions/$(az account show --query id -o tsv)
+```
+
+### Step 5: Configure AWS IAM Role
+
+```bash
+# Create IAM role with trust policy for EKS service account
+cat > trust-policy.json < azure-app-gcp.json
+
+APP_OBJECT_ID=$(jq -r '.id' azure-app-gcp.json)
+APP_CLIENT_ID=$(jq -r '.appId' azure-app-gcp.json)
+
+# Create federated credential
+az ad app federated-credential create \
+ --id $APP_OBJECT_ID \
+ --parameters '{
+ "name": "gcp-gke-federation",
+ "issuer": "'"$OIDC_ISSUER"'",
+ "subject": "system:serviceaccount:default:azure-source-ksa",
+ "audiences": ["azure"],
+ "description": "Federated credential for GCP GKE to Azure"
+ }'
+
+# Create service principal and assign Reader role
+az ad sp create --id $APP_CLIENT_ID
+az role assignment create \
+ --role Reader \
+ --assignee $APP_CLIENT_ID \
+ --scope /subscriptions/$(az account show --query id -o tsv)
+```
+
+### Step 4: Configure GKE Resources
+
+```yaml
+# azure-source-gke.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: azure-source-ksa
+ namespace: default
+ annotations:
+ iam.gke.io/gcp-service-account: azure-source-gsa@YOUR_PROJECT.iam.gserviceaccount.com
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: azure-source
+ namespace: default
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: azure-source
+ template:
+ metadata:
+ labels:
+ app: azure-source
+ spec:
+ serviceAccountName: azure-source-ksa
+ containers:
+ - name: azure-source
+ image: your-registry/azure-source:latest
+ env:
+ - name: AZURE_SUBSCRIPTION_ID
+ value: "your-azure-subscription-id"
+ - name: AZURE_TENANT_ID
+ value: "your-azure-tenant-id"
+ - name: AZURE_CLIENT_ID
+ value: "your-azure-app-client-id"
+ - name: LOG_LEVEL
+ value: "debug"
+ # GKE will inject GOOGLE_APPLICATION_CREDENTIALS automatically
+```
+
+### Step 5: Bind Service Accounts
+
+```bash
+# Allow Kubernetes service account to impersonate GCP service account
+gcloud iam service-accounts add-iam-policy-binding $GSA_EMAIL \
+ --role roles/iam.workloadIdentityUser \
+ --member "serviceAccount:$PROJECT_ID.svc.id.goog[default/azure-source-ksa]"
+
+# Deploy to GKE
+kubectl apply -f azure-source-gke.yaml
+
+# Wait for pod
+kubectl wait --for=condition=ready pod -l app=azure-source --timeout=60s
+```
+
+### Step 6: Verify
+
+```bash
+# Check logs
+kubectl logs -l app=azure-source --tail=50
+
+# Check health
+kubectl port-forward deployment/azure-source 8080:8080 &
+curl http://localhost:8080/healthz
+
+# Verify GCP token is available
+kubectl exec -it deployment/azure-source -- env | grep GOOGLE
+```
+
+### Troubleshooting
+
+```bash
+# Check workload identity binding
+gcloud iam service-accounts get-iam-policy $GSA_EMAIL
+
+# Verify token can be obtained
+kubectl exec -it deployment/azure-source -- \
+ gcloud auth print-identity-token
+
+# Check Azure federated credential
+az ad app federated-credential list --id $APP_OBJECT_ID
+```
+
+### Cleanup
+
+```bash
+# Delete GKE resources
+kubectl delete -f azure-source-gke.yaml
+
+# Delete GCP service account
+gcloud iam service-accounts delete $GSA_EMAIL --quiet
+
+# Delete Azure resources
+az ad app federated-credential delete \
+ --id $APP_OBJECT_ID \
+ --federated-credential-id gcp-gke-federation
+az ad app delete --id $APP_OBJECT_ID
+```
+
+### Success Criteria
+
+- β
GCP OIDC token exchanged for Azure credentials
+- β
Source authenticates to Azure from GKE
+- β
Resources discovered successfully
+- β
Health check passes
+
+---
+
+## Kubernetes Workload Identity Testing
+
+### Objective
+Test native Azure Workload Identity in AKS (Azure Kubernetes Service).
+
+### Prerequisites
+
+- AKS cluster with OIDC issuer and Workload Identity enabled
+- Azure AD application registered
+- Azure Workload Identity webhook installed
+
+### Setup
+
+```bash
+# Enable OIDC and Workload Identity on AKS
+az aks update \
+ --resource-group myResourceGroup \
+ --name myAKSCluster \
+ --enable-oidc-issuer \
+ --enable-workload-identity
+
+# Install Azure Workload Identity webhook (if not installed)
+helm repo add azure-workload-identity https://azure.github.io/azure-workload-identity/charts
+helm install workload-identity-webhook azure-workload-identity/workload-identity-webhook \
+ --namespace azure-workload-identity-system \
+ --create-namespace
+
+# Get OIDC issuer URL
+export OIDC_ISSUER_URL=$(az aks show \
+ --resource-group myResourceGroup \
+ --name myAKSCluster \
+ --query "oidcIssuerProfile.issuerUrl" -o tsv)
+```
+
+### Configure Azure AD
+
+```bash
+# Create application
+az ad app create --display-name "azure-source-aks-workload-id" \
+ --output json > app.json
+
+APP_OBJECT_ID=$(jq -r '.id' app.json)
+APP_CLIENT_ID=$(jq -r '.appId' app.json)
+
+# Create federated credential
+az ad app federated-credential create \
+ --id $APP_OBJECT_ID \
+ --parameters "{
+ \"name\": \"aks-workload-identity\",
+ \"issuer\": \"$OIDC_ISSUER_URL\",
+ \"subject\": \"system:serviceaccount:default:azure-source-sa\",
+ \"audiences\": [\"api://AzureADTokenExchange\"]
+ }"
+
+# Assign permissions
+az ad sp create --id $APP_CLIENT_ID
+az role assignment create \
+ --role Reader \
+ --assignee $APP_CLIENT_ID \
+ --scope /subscriptions/$(az account show --query id -o tsv)
+```
+
+### Deploy
+
+```yaml
+# azure-source-aks.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: azure-source-sa
+ annotations:
+ azure.workload.identity/client-id: "YOUR_APP_CLIENT_ID"
+ azure.workload.identity/tenant-id: "YOUR_TENANT_ID"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: azure-source
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: azure-source
+ template:
+ metadata:
+ labels:
+ app: azure-source
+ azure.workload.identity/use: "true"
+ spec:
+ serviceAccountName: azure-source-sa
+ containers:
+ - name: azure-source
+ image: your-registry/azure-source:latest
+ env:
+ - name: AZURE_SUBSCRIPTION_ID
+ value: "your-subscription-id"
+ - name: AZURE_TENANT_ID
+ value: "your-tenant-id"
+ - name: AZURE_CLIENT_ID
+ value: "your-client-id"
+```
+
+```bash
+kubectl apply -f azure-source-aks.yaml
+kubectl wait --for=condition=ready pod -l app=azure-source --timeout=60s
+kubectl logs -l app=azure-source
+```
+
+### Success Criteria
+
+- β
Workload Identity webhook injects token volume
+- β
Source authenticates using projected token
+- β
Resources discovered
+- β
Health check passes
+
+---
+
+## Verification and Validation
+
+### Standard Checks
+
+After completing any test scenario, perform these verification steps:
+
+#### 1. Health Check
+
+```bash
+# Forward port
+kubectl port-forward deployment/azure-source 8080:8080 &
+
+# Check health
+curl http://localhost:8080/healthz
+
+# Expected: "ok"
+```
+
+#### 2. Log Analysis
+
+```bash
+# Check for successful authentication
+kubectl logs -l app=azure-source | grep "Successfully initialized Azure credentials"
+
+# Check for resource discovery
+kubectl logs -l app=azure-source | grep "Discovered resource groups"
+
+# Check for subscription verification
+kubectl logs -l app=azure-source | grep "Successfully verified subscription access"
+
+# Look for errors
+kubectl logs -l app=azure-source | grep -i error
+```
+
+#### 3. Metrics and Observability
+
+If Honeycomb/Sentry integration is enabled:
+
+```bash
+# Check traces in Honeycomb for:
+# - Authentication attempts
+# - Resource discovery operations
+# - Health check calls
+
+# Check Sentry for any error reports
+```
+
+### Validation Checklist
+
+- [ ] Source starts successfully
+- [ ] No authentication errors
+- [ ] Subscription access verified
+- [ ] Resource groups discovered
+- [ ] Adapters initialized
+- [ ] Health check returns 200 OK
+- [ ] Logs show expected authentication method
+- [ ] No error traces in observability tools
+- [ ] Source survives pod restarts
+- [ ] Token refresh works (for long-running tests)
+
+### Performance Testing
+
+```bash
+# Measure startup time
+kubectl logs -l app=azure-source --timestamps | \
+ awk '/Started/ {print $1}'
+
+# Check memory usage
+kubectl top pod -l app=azure-source
+
+# Monitor over time
+watch -n 5 'kubectl top pod -l app=azure-source'
+```
+
+### Common Issues and Solutions
+
+| Issue | Possible Cause | Solution |
+|-------|---------------|----------|
+| "DefaultAzureCredential failed" | No auth method available | Check environment variables, verify OIDC token injection |
+| "Failed to verify subscription access" | Insufficient permissions | Verify Reader role assignment |
+| "Failed to list resource groups" | Network/permissions issue | Check network policies, verify subscription ID |
+| Pod crashloops | Invalid configuration | Check logs with `kubectl logs`, verify all required env vars |
+| Health check fails | Credentials expired/invalid | Check credential validity, verify RBAC |
+
+## Summary
+
+This testing guide covers:
+- β
Local development with Azure CLI
+- β
Service principal authentication
+- β
AWS to Azure federation (EKSβAzure)
+- β
GCP to Azure federation (GKEβAzure)
+- β
Native Azure Workload Identity (AKS)
+- β
Comprehensive verification steps
+
+These scenarios ensure the Azure source correctly handles federated credentials across all deployment contexts.
+
diff --git a/sources/azure/docs/usage.md b/sources/azure/docs/usage.md
new file mode 100644
index 00000000..2afcafdb
--- /dev/null
+++ b/sources/azure/docs/usage.md
@@ -0,0 +1,407 @@
+# Azure Source Usage Guide
+
+## Quick Start
+
+This guide provides quick configuration examples for running the Azure source in various environments.
+
+## Prerequisites
+
+1. **Azure Subscription**: An active Azure subscription
+2. **Azure AD Application**: A registered application in Azure AD with appropriate permissions
+3. **Permissions**: At minimum, Reader role on the subscription
+
+## Configuration Methods
+
+The Azure source can be configured using:
+1. **Command-line flags**
+2. **Environment variables**
+3. **Configuration file** (YAML)
+
+### Environment Variables
+
+Environment variables use underscores instead of hyphens and are automatically uppercased:
+- Flag: `--azure-subscription-id` β Environment: `AZURE_SUBSCRIPTION_ID`
+- Flag: `--azure-tenant-id` β Environment: `AZURE_TENANT_ID`
+- Flag: `--azure-client-id` β Environment: `AZURE_CLIENT_ID`
+
+## Common Scenarios
+
+### Scenario 1: Local Development with Azure CLI
+
+**Use Case:** Testing the source on your local machine
+
+**Prerequisites:**
+```bash
+# Install Azure CLI
+# https://learn.microsoft.com/en-us/cli/azure/install-azure-cli
+
+# Login to Azure
+az login
+
+# Set active subscription (optional, if you have multiple)
+az account set --subscription "your-subscription-name-or-id"
+
+# Verify current subscription
+az account show
+```
+
+**Configuration:**
+```bash
+# Set required environment variables
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+
+# Run the source
+./azure-source
+```
+
+**Command-line Alternative:**
+```bash
+./azure-source \
+ --azure-subscription-id="00000000-0000-0000-0000-000000000000" \
+ --azure-tenant-id="00000000-0000-0000-0000-000000000000" \
+ --azure-client-id="00000000-0000-0000-0000-000000000000"
+```
+
+### Scenario 2: Service Principal with Client Secret
+
+**Use Case:** CI/CD pipelines, Docker containers, non-Azure environments
+
+**Setup Service Principal:**
+```bash
+# Create a service principal
+az ad sp create-for-rbac --name "overmind-azure-source" \
+ --role Reader \
+ --scopes "/subscriptions/00000000-0000-0000-0000-000000000000"
+
+# Output will include:
+# {
+# "appId": "00000000-0000-0000-0000-000000000000",
+# "displayName": "overmind-azure-source",
+# "password": "your-client-secret",
+# "tenant": "00000000-0000-0000-0000-000000000000"
+# }
+```
+
+**Configuration:**
+```bash
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000" # From 'tenant' in output
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000" # From 'appId' in output
+export AZURE_CLIENT_SECRET="your-client-secret" # From 'password' in output
+
+# Run the source
+./azure-source
+```
+
+**Docker Example:**
+```dockerfile
+FROM ubuntu:22.04
+
+COPY azure-source /usr/local/bin/
+
+ENV AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+ENV AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+ENV AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+# Client secret should be passed at runtime, not baked into image
+# docker run -e AZURE_CLIENT_SECRET="..." your-image
+
+ENTRYPOINT ["/usr/local/bin/azure-source"]
+```
+
+### Scenario 3: Kubernetes with Workload Identity
+
+**Use Case:** Running in Kubernetes (AKS, EKS, GKE) with Azure Workload Identity
+
+**Prerequisites:**
+1. Azure Workload Identity installed in cluster
+2. OIDC issuer configured
+3. Federated identity credential configured in Azure AD
+
+**Setup Azure Workload Identity:**
+
+1. **Enable OIDC on your cluster** (example for AKS):
+```bash
+az aks update \
+ --resource-group myResourceGroup \
+ --name myAKSCluster \
+ --enable-oidc-issuer \
+ --enable-workload-identity
+```
+
+2. **Get OIDC Issuer URL:**
+```bash
+az aks show --resource-group myResourceGroup --name myAKSCluster \
+ --query "oidcIssuerProfile.issuerUrl" -o tsv
+```
+
+3. **Create Azure AD Application:**
+```bash
+az ad app create --display-name overmind-azure-source
+```
+
+4. **Create Federated Credential:**
+```bash
+az ad app federated-credential create \
+ --id \
+ --parameters '{
+ "name": "overmind-k8s-federation",
+ "issuer": "",
+ "subject": "system:serviceaccount:default:overmind-azure-source",
+ "audiences": ["api://AzureADTokenExchange"]
+ }'
+```
+
+5. **Assign Reader role:**
+```bash
+az role assignment create \
+ --role Reader \
+ --assignee \
+ --scope /subscriptions/
+```
+
+**Kubernetes Deployment:**
+```yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: overmind-azure-source
+ namespace: default
+ annotations:
+ azure.workload.identity/client-id: "00000000-0000-0000-0000-000000000000"
+ azure.workload.identity/tenant-id: "00000000-0000-0000-0000-000000000000"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: azure-source
+ namespace: default
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: azure-source
+ template:
+ metadata:
+ labels:
+ app: azure-source
+ azure.workload.identity/use: "true" # Important!
+ spec:
+ serviceAccountName: overmind-azure-source
+ containers:
+ - name: azure-source
+ image: your-registry/azure-source:latest
+ env:
+ - name: AZURE_SUBSCRIPTION_ID
+ value: "00000000-0000-0000-0000-000000000000"
+ - name: AZURE_TENANT_ID
+ value: "00000000-0000-0000-0000-000000000000"
+ - name: AZURE_CLIENT_ID
+ value: "00000000-0000-0000-0000-000000000000"
+ # AZURE_FEDERATED_TOKEN_FILE is set automatically by the webhook
+```
+
+### Scenario 4: Azure VM with Managed Identity
+
+**Use Case:** Running on an Azure Virtual Machine
+
+**Setup:**
+
+1. **Enable System-Assigned Managed Identity on VM:**
+```bash
+az vm identity assign \
+ --resource-group myResourceGroup \
+ --name myVM
+```
+
+2. **Assign Reader role to the managed identity:**
+```bash
+# Get the principal ID
+PRINCIPAL_ID=$(az vm show --resource-group myResourceGroup --name myVM \
+ --query identity.principalId -o tsv)
+
+# Assign role
+az role assignment create \
+ --role Reader \
+ --assignee $PRINCIPAL_ID \
+ --scope /subscriptions/
+```
+
+**Configuration on VM:**
+```bash
+# Only subscription info is needed - managed identity is automatic
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+
+./azure-source
+```
+
+### Scenario 5: Specify Azure Regions (Optional)
+
+**Use Case:** Limit discovery to specific regions for performance
+
+**Configuration:**
+```bash
+export AZURE_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_TENANT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
+export AZURE_REGIONS="eastus,westus2,northeurope"
+
+./azure-source
+```
+
+**Command-line:**
+```bash
+./azure-source \
+ --azure-subscription-id="00000000-0000-0000-0000-000000000000" \
+ --azure-tenant-id="00000000-0000-0000-0000-000000000000" \
+ --azure-client-id="00000000-0000-0000-0000-000000000000" \
+ --azure-regions="eastus,westus2,northeurope"
+```
+
+**Note:** If regions are not specified, the source will discover resources in all regions.
+
+## Configuration File
+
+You can also use a YAML configuration file (default location: `/etc/srcman/config/source.yaml`):
+
+```yaml
+# Azure Configuration
+azure-subscription-id: "00000000-0000-0000-0000-000000000000"
+azure-tenant-id: "00000000-0000-0000-0000-000000000000"
+azure-client-id: "00000000-0000-0000-0000-000000000000"
+azure-regions: "eastus,westus2"
+
+# Source Configuration
+nats-url: "nats://nats:4222"
+max-parallel-executions: 1000
+
+# Logging
+log: "info" # panic, fatal, error, warn, info, debug, trace
+
+# Health Check
+health-check-port: 8080
+
+# Tracing (Optional)
+honeycomb-api-key: "your-honeycomb-key"
+sentry-dsn: "your-sentry-dsn"
+run-mode: "release" # release, debug, or test
+```
+
+**Run with config file:**
+```bash
+./azure-source --config /path/to/config.yaml
+```
+
+## Available Flags
+
+All configuration can be provided via command-line flags:
+
+```bash
+./azure-source --help
+
+Flags:
+ # Azure-specific flags
+ --azure-subscription-id string Azure Subscription ID that this source should operate in
+ --azure-tenant-id string Azure Tenant ID (Azure AD tenant) for authentication
+ --azure-client-id string Azure Client ID (Application ID) for federated credentials authentication
+ --azure-regions string Comma-separated list of Azure regions that this source should operate in
+
+ # General flags
+ --config string config file path (default "/etc/srcman/config/source.yaml")
+ --log string Set the log level (default "info")
+ --health-check-port int The port that the health check should run on (default 8080)
+
+ # NATS flags
+ --nats-url string NATS server URL
+ --nats-name-prefix string Prefix for NATS connection name
+ --max-parallel-executions int Max number of requests to execute in parallel
+
+ # Tracing flags
+ --honeycomb-api-key string Honeycomb API key for tracing
+ --sentry-dsn string Sentry DSN for error tracking
+ --run-mode string Run mode: release, debug, or test (default "release")
+```
+
+## Health Check
+
+The source exposes a health check endpoint:
+
+```bash
+# Check health
+curl http://localhost:8080/healthz
+
+# Response: "ok" (HTTP 200) if healthy
+# Response: Error message (HTTP 500) if unhealthy
+```
+
+The health check verifies:
+1. Source is running
+2. Credentials are valid
+3. Subscription is accessible
+
+## Troubleshooting
+
+### Check Logs
+
+```bash
+# Enable debug logging
+export LOG_LEVEL=debug
+./azure-source
+
+# Or with flag
+./azure-source --log=debug
+```
+
+### Verify Authentication
+
+```bash
+# Test Azure CLI authentication
+az account show
+
+# Test service principal authentication
+az login --service-principal \
+ --username $AZURE_CLIENT_ID \
+ --password $AZURE_CLIENT_SECRET \
+ --tenant $AZURE_TENANT_ID
+
+# List resource groups to verify permissions
+az group list --subscription $AZURE_SUBSCRIPTION_ID
+```
+
+### Common Issues
+
+**Issue:** "failed to create Azure credential"
+- **Solution:** Verify environment variables are set correctly. For local development, ensure `az login` is completed.
+
+**Issue:** "failed to verify subscription access"
+- **Solution:** Verify the identity has Reader role on the subscription. Check subscription ID is correct.
+
+**Issue:** "No resource groups found"
+- **Solution:** This may be normal if the subscription has no resource groups. The source will still run successfully.
+
+## Best Practices
+
+1. **Use Workload Identity in Production**: Most secure method, no credential management needed
+2. **Never Hard-code Secrets**: Always use environment variables or secret management systems
+3. **Use Least Privilege**: Grant only Reader role unless write access is needed
+4. **Rotate Credentials**: If using service principals, rotate secrets regularly
+5. **Monitor Health Endpoint**: Integrate health checks into your orchestration system
+6. **Enable Tracing**: Use Honeycomb and Sentry for production observability
+
+## Next Steps
+
+- See [federated-credentials.md](./federated-credentials.md) for detailed authentication information
+- See [testing-federated-auth.md](./testing-federated-auth.md) for testing scenarios with external identities
+- Review [Azure RBAC documentation](https://learn.microsoft.com/en-us/azure/role-based-access-control/) for permission management
+
+## Support
+
+For issues or questions:
+1. Check logs with `--log=debug`
+2. Verify Azure permissions with Azure CLI
+3. Review the federated credentials documentation
+4. Check the health endpoint for status
+
diff --git a/sources/azure/integration-tests/compute-virtual-machine_test.go b/sources/azure/integration-tests/compute-virtual-machine_test.go
new file mode 100644
index 00000000..a570a51c
--- /dev/null
+++ b/sources/azure/integration-tests/compute-virtual-machine_test.go
@@ -0,0 +1,614 @@
+package integrationtests
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+ log "github.com/sirupsen/logrus"
+ "k8s.io/utils/ptr"
+
+ "github.com/overmindtech/cli/discovery"
+ "github.com/overmindtech/cli/sources"
+ "github.com/overmindtech/cli/sources/azure/clients"
+ "github.com/overmindtech/cli/sources/azure/manual"
+ azureshared "github.com/overmindtech/cli/sources/azure/shared"
+)
+
+const (
+ integrationTestResourceGroup = "overmind-integration-tests"
+ integrationTestLocation = "westus2"
+ integrationTestVMName = "ovm-integ-test-vm"
+ integrationTestNICName = "ovm-integ-test-nic"
+ integrationTestVNetName = "ovm-integ-test-vnet"
+ integrationTestSubnetName = "default"
+)
+
+func TestComputeVirtualMachineIntegration(t *testing.T) {
+ subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
+ if subscriptionID == "" {
+ t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set")
+ }
+
+ // Initialize Azure credentials using DefaultAzureCredential
+ cred, err := azureshared.NewAzureCredential(t.Context())
+ if err != nil {
+ t.Fatalf("Failed to create Azure credential: %v", err)
+ }
+
+ // Create Azure SDK clients
+ vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionID, cred, nil)
+ if err != nil {
+ t.Fatalf("Failed to create Virtual Machines client: %v", err)
+ }
+
+ rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil)
+ if err != nil {
+ t.Fatalf("Failed to create Resource Groups client: %v", err)
+ }
+
+ vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionID, cred, nil)
+ if err != nil {
+ t.Fatalf("Failed to create Virtual Networks client: %v", err)
+ }
+
+ subnetClient, err := armnetwork.NewSubnetsClient(subscriptionID, cred, nil)
+ if err != nil {
+ t.Fatalf("Failed to create Subnets client: %v", err)
+ }
+
+ nicClient, err := armnetwork.NewInterfacesClient(subscriptionID, cred, nil)
+ if err != nil {
+ t.Fatalf("Failed to create Network Interfaces client: %v", err)
+ }
+
+ t.Run("Setup", func(t *testing.T) {
+ ctx := t.Context()
+
+ // Create resource group if it doesn't exist
+ err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation)
+ if err != nil {
+ t.Fatalf("Failed to create resource group: %v", err)
+ }
+
+ // Create virtual network
+ err = createVirtualNetwork(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetName, integrationTestLocation)
+ if err != nil {
+ t.Fatalf("Failed to create virtual network: %v", err)
+ }
+
+ // Get subnet ID for NIC creation
+ subnetResp, err := subnetClient.Get(ctx, integrationTestResourceGroup, integrationTestVNetName, integrationTestSubnetName, nil)
+ if err != nil {
+ t.Fatalf("Failed to get subnet: %v", err)
+ }
+
+ // Create network interface
+ err = createNetworkInterface(ctx, nicClient, integrationTestResourceGroup, integrationTestNICName, integrationTestLocation, *subnetResp.ID)
+ if err != nil {
+ t.Fatalf("Failed to create network interface: %v", err)
+ }
+
+ // Get NIC ID for VM creation
+ nicResp, err := nicClient.Get(ctx, integrationTestResourceGroup, integrationTestNICName, nil)
+ if err != nil {
+ t.Fatalf("Failed to get network interface: %v", err)
+ }
+
+ // Create virtual machine
+ err = createVirtualMachine(ctx, vmClient, integrationTestResourceGroup, integrationTestVMName, integrationTestLocation, *nicResp.ID)
+ if err != nil {
+ t.Fatalf("Failed to create virtual machine: %v", err)
+ }
+
+ // Wait for VM to be fully available via the API
+ err = waitForVMAvailable(ctx, vmClient, integrationTestResourceGroup, integrationTestVMName)
+ if err != nil {
+ t.Fatalf("Failed waiting for VM to be available: %v", err)
+ }
+ })
+
+ t.Run("Run", func(t *testing.T) {
+ t.Run("GetVirtualMachine", func(t *testing.T) {
+ ctx := t.Context()
+
+ log.Printf("Retrieving virtual machine %s in subscription %s, resource group %s",
+ integrationTestVMName, subscriptionID, integrationTestResourceGroup)
+
+ vmWrapper := manual.NewComputeVirtualMachine(
+ clients.NewVirtualMachinesClient(vmClient),
+ subscriptionID,
+ integrationTestResourceGroup,
+ )
+ scope := vmWrapper.Scopes()[0]
+
+ vmAdapter := sources.WrapperToAdapter(vmWrapper)
+ sdpItem, qErr := vmAdapter.Get(ctx, scope, integrationTestVMName, true)
+ if qErr != nil {
+ t.Fatalf("Expected no error, got: %v", qErr)
+ }
+
+ if sdpItem == nil {
+ t.Fatalf("Expected sdpItem to be non-nil")
+ }
+
+ uniqueAttrKey := sdpItem.GetUniqueAttribute()
+ uniqueAttrValue, err := sdpItem.GetAttributes().Get(uniqueAttrKey)
+ if err != nil {
+ t.Fatalf("Failed to get unique attribute: %v", err)
+ }
+
+ if uniqueAttrValue != integrationTestVMName {
+ t.Fatalf("Expected unique attribute value to be %s, got %s", integrationTestVMName, uniqueAttrValue)
+ }
+
+ log.Printf("Successfully retrieved virtual machine %s", integrationTestVMName)
+ })
+
+ t.Run("ListVirtualMachines", func(t *testing.T) {
+ ctx := t.Context()
+
+ log.Printf("Listing virtual machines in subscription %s, resource group %s",
+ subscriptionID, integrationTestResourceGroup)
+
+ vmWrapper := manual.NewComputeVirtualMachine(
+ clients.NewVirtualMachinesClient(vmClient),
+ subscriptionID,
+ integrationTestResourceGroup,
+ )
+ scope := vmWrapper.Scopes()[0]
+
+ vmAdapter := sources.WrapperToAdapter(vmWrapper)
+
+ // Check if adapter supports listing
+ listable, ok := vmAdapter.(discovery.ListableAdapter)
+ if !ok {
+ t.Fatalf("Adapter does not support List operation")
+ }
+
+ sdpItems, err := listable.List(ctx, scope, true)
+ if err != nil {
+ t.Fatalf("Failed to list virtual machines: %v", err)
+ }
+
+ if len(sdpItems) < 1 {
+ t.Fatalf("Expected at least one virtual machine, got %d", len(sdpItems))
+ }
+
+ var found bool
+ for _, item := range sdpItems {
+ uniqueAttrKey := item.GetUniqueAttribute()
+ if v, err := item.GetAttributes().Get(uniqueAttrKey); err == nil && v == integrationTestVMName {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("Expected to find VM %s in the list of virtual machines", integrationTestVMName)
+ }
+
+ log.Printf("Found %d virtual machines in resource group %s", len(sdpItems), integrationTestResourceGroup)
+ })
+
+ t.Run("VerifyLinkedItems", func(t *testing.T) {
+ ctx := t.Context()
+
+ log.Printf("Verifying linked items for virtual machine %s", integrationTestVMName)
+
+ vmWrapper := manual.NewComputeVirtualMachine(
+ clients.NewVirtualMachinesClient(vmClient),
+ subscriptionID,
+ integrationTestResourceGroup,
+ )
+ scope := vmWrapper.Scopes()[0]
+
+ vmAdapter := sources.WrapperToAdapter(vmWrapper)
+ sdpItem, qErr := vmAdapter.Get(ctx, scope, integrationTestVMName, true)
+ if qErr != nil {
+ t.Fatalf("Expected no error, got: %v", qErr)
+ }
+
+ // Verify that linked items exist (OS disk and NIC should be linked)
+ linkedQueries := sdpItem.GetLinkedItemQueries()
+ if len(linkedQueries) == 0 {
+ t.Fatalf("Expected linked item queries, but got none")
+ }
+
+ var hasDiskLink, hasNICLink bool
+ for _, liq := range linkedQueries {
+ switch liq.GetQuery().GetType() {
+ case azureshared.ComputeDisk.String():
+ hasDiskLink = true
+ case azureshared.NetworkNetworkInterface.String():
+ hasNICLink = true
+ }
+ }
+
+ if !hasDiskLink {
+ t.Error("Expected linked query to OS disk, but didn't find one")
+ }
+
+ if !hasNICLink {
+ t.Error("Expected linked query to network interface, but didn't find one")
+ }
+
+ log.Printf("Verified %d linked item queries for VM %s", len(linkedQueries), integrationTestVMName)
+ })
+ })
+
+ t.Run("Teardown", func(t *testing.T) {
+ ctx := t.Context()
+
+ // Delete VM first (it must be deleted before NIC can be deleted)
+ err := deleteVirtualMachine(ctx, vmClient, integrationTestResourceGroup, integrationTestVMName)
+ if err != nil {
+ t.Fatalf("Failed to delete virtual machine: %v", err)
+ }
+
+ // Delete NIC
+ err = deleteNetworkInterface(ctx, nicClient, integrationTestResourceGroup, integrationTestNICName)
+ if err != nil {
+ t.Fatalf("Failed to delete network interface: %v", err)
+ }
+
+ // Delete VNet (this also deletes the subnet)
+ err = deleteVirtualNetwork(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetName)
+ if err != nil {
+ t.Fatalf("Failed to delete virtual network: %v", err)
+ }
+
+ // Optionally delete the resource group
+ // Note: We keep the resource group for faster subsequent test runs
+ // Uncomment the following if you want to clean up completely:
+ // err = deleteResourceGroup(ctx, rgClient, integrationTestResourceGroup)
+ // if err != nil {
+ // t.Fatalf("Failed to delete resource group: %v", err)
+ // }
+ })
+}
+
+// createResourceGroup creates an Azure resource group if it doesn't already exist (idempotent)
+func createResourceGroup(ctx context.Context, client *armresources.ResourceGroupsClient, resourceGroupName, location string) error {
+ // Check if resource group already exists
+ _, err := client.Get(ctx, resourceGroupName, nil)
+ if err == nil {
+ log.Printf("Resource group %s already exists, skipping creation", resourceGroupName)
+ return nil
+ }
+
+ // Create the resource group
+ _, err = client.CreateOrUpdate(ctx, resourceGroupName, armresources.ResourceGroup{
+ Location: ptr.To(location),
+ Tags: map[string]*string{
+ "purpose": ptr.To("overmind-integration-tests"),
+ "managed": ptr.To("true"),
+ },
+ }, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create resource group: %w", err)
+ }
+
+ log.Printf("Resource group %s created successfully in location %s", resourceGroupName, location)
+ return nil
+}
+
+// createVirtualNetwork creates an Azure virtual network with a default subnet (idempotent)
+func createVirtualNetwork(ctx context.Context, client *armnetwork.VirtualNetworksClient, resourceGroupName, vnetName, location string) error {
+ // Check if VNet already exists
+ _, err := client.Get(ctx, resourceGroupName, vnetName, nil)
+ if err == nil {
+ log.Printf("Virtual network %s already exists, skipping creation", vnetName)
+ return nil
+ }
+
+ // Create the VNet
+ poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{
+ Location: ptr.To(location),
+ Properties: &armnetwork.VirtualNetworkPropertiesFormat{
+ AddressSpace: &armnetwork.AddressSpace{
+ AddressPrefixes: []*string{ptr.To("10.0.0.0/16")},
+ },
+ Subnets: []*armnetwork.Subnet{
+ {
+ Name: ptr.To(integrationTestSubnetName),
+ Properties: &armnetwork.SubnetPropertiesFormat{
+ AddressPrefix: ptr.To("10.0.0.0/24"),
+ },
+ },
+ },
+ },
+ Tags: map[string]*string{
+ "purpose": ptr.To("overmind-integration-tests"),
+ },
+ }, nil)
+ if err != nil {
+ return fmt.Errorf("failed to begin creating virtual network: %w", err)
+ }
+
+ _, err = poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create virtual network: %w", err)
+ }
+
+ log.Printf("Virtual network %s created successfully", vnetName)
+ return nil
+}
+
+// createNetworkInterface creates an Azure network interface (idempotent)
+func createNetworkInterface(ctx context.Context, client *armnetwork.InterfacesClient, resourceGroupName, nicName, location, subnetID string) error {
+ // Check if NIC already exists
+ _, err := client.Get(ctx, resourceGroupName, nicName, nil)
+ if err == nil {
+ log.Printf("Network interface %s already exists, skipping creation", nicName)
+ return nil
+ }
+
+ // Create the NIC
+ poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{
+ Location: ptr.To(location),
+ Properties: &armnetwork.InterfacePropertiesFormat{
+ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
+ {
+ Name: ptr.To("ipconfig1"),
+ Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
+ Subnet: &armnetwork.Subnet{
+ ID: ptr.To(subnetID),
+ },
+ PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic),
+ },
+ },
+ },
+ },
+ Tags: map[string]*string{
+ "purpose": ptr.To("overmind-integration-tests"),
+ },
+ }, nil)
+ if err != nil {
+ return fmt.Errorf("failed to begin creating network interface: %w", err)
+ }
+
+ _, err = poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create network interface: %w", err)
+ }
+
+ log.Printf("Network interface %s created successfully", nicName)
+ return nil
+}
+
+// createVirtualMachine creates an Azure virtual machine (idempotent)
+func createVirtualMachine(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string) error {
+ // Check if VM already exists
+ existingVM, err := client.Get(ctx, resourceGroupName, vmName, nil)
+ if err == nil {
+ // VM exists, check its state
+ if existingVM.Properties != nil && existingVM.Properties.ProvisioningState != nil {
+ state := *existingVM.Properties.ProvisioningState
+ if state == "Succeeded" {
+ log.Printf("Virtual machine %s already exists with state %s, skipping creation", vmName, state)
+ return nil
+ }
+ log.Printf("Virtual machine %s exists but in state %s, will wait for it", vmName, state)
+ } else {
+ log.Printf("Virtual machine %s already exists, skipping creation", vmName)
+ return nil
+ }
+ }
+
+ // Create the VM
+ poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, armcompute.VirtualMachine{
+ Location: ptr.To(location),
+ Properties: &armcompute.VirtualMachineProperties{
+ HardwareProfile: &armcompute.HardwareProfile{
+ // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2
+ VMSize: ptr.To(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")),
+ },
+ StorageProfile: &armcompute.StorageProfile{
+ ImageReference: &armcompute.ImageReference{
+ Publisher: ptr.To("Canonical"),
+ Offer: ptr.To("0001-com-ubuntu-server-jammy"),
+ SKU: ptr.To("22_04-lts-arm64"), // ARM64 image for ARM-based VM
+ Version: ptr.To("latest"),
+ },
+ OSDisk: &armcompute.OSDisk{
+ Name: ptr.To(fmt.Sprintf("%s-osdisk", vmName)),
+ CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage),
+ ManagedDisk: &armcompute.ManagedDiskParameters{
+ StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS),
+ },
+ DeleteOption: ptr.To(armcompute.DiskDeleteOptionTypesDelete),
+ },
+ },
+ OSProfile: &armcompute.OSProfile{
+ ComputerName: ptr.To(vmName),
+ AdminUsername: ptr.To("azureuser"),
+ // Use password authentication for integration tests (simpler than SSH keys)
+ AdminPassword: ptr.To("OvmIntegTest2024!"),
+ LinuxConfiguration: &armcompute.LinuxConfiguration{
+ DisablePasswordAuthentication: ptr.To(false),
+ },
+ },
+ NetworkProfile: &armcompute.NetworkProfile{
+ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
+ {
+ ID: ptr.To(nicID),
+ Properties: &armcompute.NetworkInterfaceReferenceProperties{
+ Primary: ptr.To(true),
+ },
+ },
+ },
+ },
+ },
+ Tags: map[string]*string{
+ "purpose": ptr.To("overmind-integration-tests"),
+ "test": ptr.To("compute-virtual-machine"),
+ },
+ }, nil)
+ if err != nil {
+ // Check if VM already exists (conflict)
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict {
+ log.Printf("Virtual machine %s already exists (conflict), skipping creation", vmName)
+ return nil
+ }
+ return fmt.Errorf("failed to begin creating virtual machine: %w", err)
+ }
+
+ resp, err := poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create virtual machine: %w", err)
+ }
+
+ // Verify the VM was created successfully
+ if resp.Properties == nil || resp.Properties.ProvisioningState == nil {
+ return fmt.Errorf("VM created but provisioning state is unknown")
+ }
+
+ provisioningState := *resp.Properties.ProvisioningState
+ if provisioningState != "Succeeded" {
+ return fmt.Errorf("VM provisioning state is %s, expected Succeeded", provisioningState)
+ }
+
+ log.Printf("Virtual machine %s created successfully with provisioning state: %s", vmName, provisioningState)
+ return nil
+}
+
+// waitForVMAvailable polls until the VM is available via the Get API
+// This is needed because even after creation succeeds, there can be a delay before the VM is queryable
+func waitForVMAvailable(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName string) error {
+ maxAttempts := 20
+ pollInterval := 10 * time.Second
+
+ log.Printf("Waiting for VM %s to be available via API...", vmName)
+
+ for attempt := 1; attempt <= maxAttempts; attempt++ {
+ resp, err := client.Get(ctx, resourceGroupName, vmName, nil)
+ if err != nil {
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound {
+ log.Printf("VM %s not yet available (attempt %d/%d), waiting %v...", vmName, attempt, maxAttempts, pollInterval)
+ time.Sleep(pollInterval)
+ continue
+ }
+ return fmt.Errorf("error checking VM availability: %w", err)
+ }
+
+ // Check provisioning state
+ if resp.Properties != nil && resp.Properties.ProvisioningState != nil {
+ state := *resp.Properties.ProvisioningState
+ if state == "Succeeded" {
+ log.Printf("VM %s is available with provisioning state: %s", vmName, state)
+ return nil
+ }
+ if state == "Failed" {
+ return fmt.Errorf("VM provisioning failed with state: %s", state)
+ }
+ // Still provisioning, wait and retry
+ log.Printf("VM %s provisioning state: %s (attempt %d/%d), waiting...", vmName, state, attempt, maxAttempts)
+ time.Sleep(pollInterval)
+ continue
+ }
+
+ // VM exists but no provisioning state - consider it available
+ log.Printf("VM %s is available", vmName)
+ return nil
+ }
+
+ return fmt.Errorf("timeout waiting for VM %s to be available after %d attempts", vmName, maxAttempts)
+}
+
+// deleteVirtualMachine deletes an Azure virtual machine
+func deleteVirtualMachine(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName string) error {
+ // Use forceDeletion to speed up cleanup
+ poller, err := client.BeginDelete(ctx, resourceGroupName, vmName, &armcompute.VirtualMachinesClientBeginDeleteOptions{
+ ForceDeletion: ptr.To(true),
+ })
+ if err != nil {
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound {
+ log.Printf("Virtual machine %s not found, skipping deletion", vmName)
+ return nil
+ }
+ return fmt.Errorf("failed to begin deleting virtual machine: %w", err)
+ }
+
+ _, err = poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to delete virtual machine: %w", err)
+ }
+
+ log.Printf("Virtual machine %s deleted successfully", vmName)
+
+ // Wait a bit to allow Azure to release associated resources
+ log.Printf("Waiting 30 seconds for Azure to release associated resources...")
+ time.Sleep(30 * time.Second)
+
+ return nil
+}
+
+// deleteNetworkInterface deletes an Azure network interface with retry logic
+// Azure reserves NICs for 180 seconds after VM deletion, so we may need to retry
+func deleteNetworkInterface(ctx context.Context, client *armnetwork.InterfacesClient, resourceGroupName, nicName string) error {
+ maxRetries := 4
+ retryDelay := 60 * time.Second
+
+ for attempt := 1; attempt <= maxRetries; attempt++ {
+ poller, err := client.BeginDelete(ctx, resourceGroupName, nicName, nil)
+ if err != nil {
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) {
+ if respErr.StatusCode == http.StatusNotFound {
+ log.Printf("Network interface %s not found, skipping deletion", nicName)
+ return nil
+ }
+ // Handle NicReservedForAnotherVm error - retry after delay
+ if respErr.ErrorCode == "NicReservedForAnotherVm" && attempt < maxRetries {
+ log.Printf("NIC %s is reserved, waiting %v before retry (attempt %d/%d)", nicName, retryDelay, attempt, maxRetries)
+ time.Sleep(retryDelay)
+ continue
+ }
+ }
+ return fmt.Errorf("failed to begin deleting network interface: %w", err)
+ }
+
+ _, err = poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to delete network interface: %w", err)
+ }
+
+ log.Printf("Network interface %s deleted successfully", nicName)
+ return nil
+ }
+
+ return fmt.Errorf("failed to delete network interface %s after %d attempts", nicName, maxRetries)
+}
+
+// deleteVirtualNetwork deletes an Azure virtual network
+func deleteVirtualNetwork(ctx context.Context, client *armnetwork.VirtualNetworksClient, resourceGroupName, vnetName string) error {
+ poller, err := client.BeginDelete(ctx, resourceGroupName, vnetName, nil)
+ if err != nil {
+ var respErr *azcore.ResponseError
+ if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound {
+ log.Printf("Virtual network %s not found, skipping deletion", vnetName)
+ return nil
+ }
+ return fmt.Errorf("failed to begin deleting virtual network: %w", err)
+ }
+
+ _, err = poller.PollUntilDone(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to delete virtual network: %w", err)
+ }
+
+ log.Printf("Virtual network %s deleted successfully", vnetName)
+ return nil
+}
diff --git a/sources/azure/manual/adapters.go b/sources/azure/manual/adapters.go
index 621daf8d..28beceb1 100644
--- a/sources/azure/manual/adapters.go
+++ b/sources/azure/manual/adapters.go
@@ -2,70 +2,97 @@ package manual
import (
"context"
+ "fmt"
- "golang.org/x/oauth2"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+ log "github.com/sirupsen/logrus"
"github.com/overmindtech/cli/discovery"
- // The following imports will be needed when implementing adapter registration:
- // "github.com/overmindtech/cli/sources"
- // "github.com/overmindtech/cli/sources/azure/clients"
+ "github.com/overmindtech/cli/sources"
+ "github.com/overmindtech/cli/sources/azure/clients"
)
// Adapters returns a slice of discovery.Adapter instances for Azure Source.
// It initializes Azure clients if initAzureClients is true, and creates adapters for the specified subscription ID and regions.
// Otherwise, it uses nil clients, which is useful for enumerating adapters for documentation purposes.
-// TODO: fix function signature to use subscriptionID instead of projectID and remove zones/tokenSource parameters in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
-func Adapters(ctx context.Context, subscriptionID string, regions []string, zones []string, tokenSource *oauth2.TokenSource, initAzureClients bool) ([]discovery.Adapter, error) {
- // TODO: instantiate Azure clients using federated credentials in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
-
+func Adapters(ctx context.Context, subscriptionID string, regions []string, cred *azidentity.DefaultAzureCredential, initAzureClients bool) ([]discovery.Adapter, error) {
var adapters []discovery.Adapter
if initAzureClients {
- // TODO: Initialize Azure SDK clients using federated credentials in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
- // Example:
- // cred, err := azidentity.NewDefaultAzureCredential(nil)
- // if err != nil {
- // return nil, fmt.Errorf("failed to create Azure credential: %w", err)
- // }
- // vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionID, cred, nil)
- // if err != nil {
- // return nil, fmt.Errorf("failed to create virtual machines client: %w", err)
- // }
-
- // TODO: Discover resource groups in the subscription (requires ENG-1830 for authentication)
- // For now, this is a placeholder showing the pattern:
- // resourceGroups := []string{"rg-example-1", "rg-example-2"} // Would be discovered via Azure Resource Manager API
- // for _, resourceGroup := range resourceGroups {
- // adapters = append(adapters,
- // sources.WrapperToAdapter(NewComputeVirtualMachine(
- // clients.NewVirtualMachinesClient(vmClient),
- // subscriptionID,
- // resourceGroup,
- // )),
- // )
- // }
- _ = subscriptionID // Suppress unused parameter warning - will be used when ENG-1830 is implemented
+ if cred == nil {
+ return nil, fmt.Errorf("credentials are required when initAzureClients is true")
+ }
+
+ log.WithFields(log.Fields{
+ "ovm.source.subscription_id": subscriptionID,
+ "ovm.source.regions": regions,
+ }).Info("Initializing Azure clients and discovering resource groups")
+
+ // Create resource groups client to discover all resource groups in the subscription
+ rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create resource groups client: %w", err)
+ }
+
+ // Discover resource groups in the subscription
+ resourceGroups := make([]string, 0)
+ pager := rgClient.NewListPager(nil)
+ for pager.More() {
+ page, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list resource groups: %w", err)
+ }
+
+ for _, rg := range page.Value {
+ if rg.Name != nil {
+ resourceGroups = append(resourceGroups, *rg.Name)
+ }
+ }
+ }
+
+ log.WithFields(log.Fields{
+ "ovm.source.subscription_id": subscriptionID,
+ "ovm.source.resource_group_count": len(resourceGroups),
+ }).Info("Discovered resource groups")
+
+ // Initialize Azure SDK clients
+ vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionID, cred, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create virtual machines client: %w", err)
+ }
+
+ // Create adapters for each resource group
+ for _, resourceGroup := range resourceGroups {
+ // Add Compute Virtual Machine adapter for this resource group
+ adapters = append(adapters,
+ sources.WrapperToAdapter(NewComputeVirtualMachine(
+ clients.NewVirtualMachinesClient(vmClient),
+ subscriptionID,
+ resourceGroup,
+ )),
+ )
+ }
+
+ log.WithFields(log.Fields{
+ "ovm.source.subscription_id": subscriptionID,
+ "ovm.source.adapter_count": len(adapters),
+ }).Info("Initialized Azure adapters")
+
} else {
- // Example: Compute Virtual Machine adapter registration pattern
- // This shows how adapters will be registered once ENG-1830 implements client initialization.
- // The actual registration requires:
- // 1. Azure SDK client initialized with federated credentials (ENG-1830)
- // 2. Resource group discovery (can be done via Azure Resource Manager API)
- //
- // Example pattern (commented until ENG-1830):
- // import (
- // "github.com/overmindtech/cli/sources"
- // "github.com/overmindtech/cli/sources/azure/clients"
- // )
- // vmClient := clients.NewVirtualMachinesClient(armcomputeClient) // Requires ENG-1830 for armcomputeClient
- // adapters = append(adapters,
- // sources.WrapperToAdapter(NewComputeVirtualMachine(
- // vmClient,
- // subscriptionID,
- // "example-resource-group", // Would be discovered from subscription
- // )),
- // )
- _ = subscriptionID // Suppress unused parameter warning - will be used when ENG-1830 is implemented
+ // For metadata registration only - no actual clients needed
+ // This is used to enumerate available adapter types for documentation
+ // Create placeholder adapters with nil clients for metadata registration
+ adapters = append(adapters,
+ sources.WrapperToAdapter(NewComputeVirtualMachine(
+ nil, // nil client is okay for metadata registration
+ subscriptionID,
+ "placeholder-resource-group",
+ )),
+ )
+
+ _ = regions
}
return adapters, nil
diff --git a/sources/azure/proc/proc.go b/sources/azure/proc/proc.go
index 62e4392e..80449913 100644
--- a/sources/azure/proc/proc.go
+++ b/sources/azure/proc/proc.go
@@ -6,6 +6,8 @@ import (
"fmt"
"sync"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
@@ -16,8 +18,7 @@ import (
// "github.com/overmindtech/cli/sources/azure/dynamic"
// _ "github.com/overmindtech/cli/sources/azure/dynamic/adapters" // Import all adapters to register them
"github.com/overmindtech/cli/sources/azure/manual"
- // TODO: Uncomment when azureshared.Linker is implemented
- // azureshared "github.com/overmindtech/cli/sources/azure/shared"
+ azureshared "github.com/overmindtech/cli/sources/azure/shared"
)
// Metadata contains the metadata for the Azure source
@@ -43,6 +44,7 @@ func init() {
"tenant",
"client",
[]string{"region"},
+ nil, // No credentials needed for metadata registration
nil,
false,
)
@@ -113,20 +115,23 @@ func Initialize(ctx context.Context, ec *discovery.EngineConfig, cfg *AzureConfi
return fmt.Errorf("Azure source must specify subscription ID")
}
- // TODO: Implement linker when Azure dynamic adapters are available in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
+ // Initialize Azure credentials
+ cred, err := azureshared.NewAzureCredential(ctx)
+ if err != nil {
+ return fmt.Errorf("error creating Azure credentials: %w", err)
+ }
+
+ // TODO: Implement linker when Azure dynamic adapters are available
var linker interface{} = nil
- discoveryAdapters, err := adapters(ctx, cfg.SubscriptionID, cfg.TenantID, cfg.ClientID, cfg.Regions, linker, true)
+ discoveryAdapters, err := adapters(ctx, cfg.SubscriptionID, cfg.TenantID, cfg.ClientID, cfg.Regions, cred, linker, true)
if err != nil {
return fmt.Errorf("error creating discovery adapters: %w", err)
}
- // TODO: Add permission check for Azure subscription access in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
- // This would verify that the credentials can access the subscription
+ // Set up permission check that verifies subscription access
permissionCheck = func() error {
- // For now, we'll skip the permission check until we have a subscription adapter
- // In the future, we can add a check similar to GCP's project check
- return nil
+ return checkSubscriptionAccess(ctx, cfg.SubscriptionID, cred)
}
err = permissionCheck()
@@ -201,19 +206,18 @@ func adapters(
tenantID string,
clientID string,
regions []string,
- linker interface{}, // TODO: Use *azureshared.Linker when azureshared package is fully implemented in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
+ cred *azidentity.DefaultAzureCredential,
+ linker interface{}, // TODO: Use *azureshared.Linker when azureshared package is fully implemented
initAzureClients bool,
) ([]discovery.Adapter, error) {
discoveryAdapters := make([]discovery.Adapter, 0)
// Add manual adapters
- // Note: manual.Adapters currently uses projectID parameter name but accepts subscriptionID
manualAdapters, err := manual.Adapters(
ctx,
- subscriptionID, // passed as projectID parameter (will be updated in manual.Adapters)
+ subscriptionID,
regions,
- nil, // zones not used in Azure
- nil, // tokenSource not used with federated credentials
+ cred,
initAzureClients,
)
if err != nil {
@@ -227,11 +231,6 @@ func adapters(
discoveryAdapters = append(discoveryAdapters, manualAdapters...)
- // Azure SDK handles authentication automatically via federated credentials
- // when running in Kubernetes/EKS with workload identity
- // For local development, Azure SDK will use Azure CLI or environment variables
- _ = initAzureClients // TODO: Use this when implementing Azure client initialization in https://linear.app/overmind/issue/ENG-1830/authenticate-to-azure-using-federated-credentials
-
// TODO: Add dynamic adapters when Azure dynamic adapter framework is implemented
// dynamicAdapters, err := dynamic.Adapters(
// subscriptionID,
@@ -247,5 +246,34 @@ func adapters(
// }
// discoveryAdapters = append(discoveryAdapters, dynamicAdapters...)
+ _ = tenantID // Used for metadata/logging
+ _ = clientID // Used for metadata/logging
+
return discoveryAdapters, nil
}
+
+// checkSubscriptionAccess verifies that the credentials have access to the specified subscription
+func checkSubscriptionAccess(ctx context.Context, subscriptionID string, cred *azidentity.DefaultAzureCredential) error {
+ // Create a resource groups client to test subscription access
+ client, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create resource groups client: %w", err)
+ }
+
+ // Try to list resource groups to verify access
+ pager := client.NewListPager(nil)
+ if !pager.More() {
+ // No resource groups, but that's okay - we just want to verify we can access the subscription
+ log.WithField("ovm.source.subscription_id", subscriptionID).Info("Successfully verified subscription access (no resource groups found)")
+ return nil
+ }
+
+ // Try to get the first page to verify we have access
+ _, err = pager.NextPage(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to verify subscription access: %w", err)
+ }
+
+ log.WithField("ovm.source.subscription_id", subscriptionID).Info("Successfully verified subscription access")
+ return nil
+}
diff --git a/sources/azure/proc/proc_test.go b/sources/azure/proc/proc_test.go
index 0c30051c..ad0620a6 100644
--- a/sources/azure/proc/proc_test.go
+++ b/sources/azure/proc/proc_test.go
@@ -17,6 +17,7 @@ func Test_adapters(t *testing.T) {
"tenant",
"client",
[]string{"region"},
+ nil, // No credentials needed for metadata registration
nil,
false,
)
diff --git a/sources/azure/shared/credentials.go b/sources/azure/shared/credentials.go
new file mode 100644
index 00000000..af28c7ab
--- /dev/null
+++ b/sources/azure/shared/credentials.go
@@ -0,0 +1,34 @@
+package shared
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ log "github.com/sirupsen/logrus"
+)
+
+// NewAzureCredential creates a new DefaultAzureCredential which automatically handles
+// multiple authentication methods in the following order:
+// 1. Environment variables (AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_FEDERATED_TOKEN_FILE, etc.)
+// 2. Workload Identity (Kubernetes with OIDC federation)
+// 3. Managed Identity (when running in Azure)
+// 4. Azure CLI (for local development)
+//
+// Reference: https://learn.microsoft.com/en-us/azure/developer/go/sdk/authentication/credential-chains
+func NewAzureCredential(ctx context.Context) (*azidentity.DefaultAzureCredential, error) {
+ log.Debug("Initializing Azure credentials using DefaultAzureCredential")
+
+ cred, err := azidentity.NewDefaultAzureCredential(nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Azure credential: %w", err)
+ }
+
+ log.WithFields(log.Fields{
+ "ovm.auth.method": "default-azure-credential",
+ "ovm.auth.type": "federated-or-environment",
+ }).Info("Successfully initialized Azure credentials")
+
+ return cred, nil
+}
+
diff --git a/sources/gcp/dynamic/shared.go b/sources/gcp/dynamic/shared.go
index 0fe3aaca..f80c2963 100644
--- a/sources/gcp/dynamic/shared.go
+++ b/sources/gcp/dynamic/shared.go
@@ -7,6 +7,7 @@ import (
"io"
"net/http"
"net/url"
+ "slices"
"strings"
log "github.com/sirupsen/logrus"
@@ -291,16 +292,19 @@ func externalCallMulti(ctx context.Context, itemsSelector string, httpCli *http.
}
func potentialLinksFromBlasts(itemType shared.ItemType, blasts map[shared.ItemType]map[string]*gcpshared.Impact) []string {
- var potentialLinks []string
var potentialLinksMap = make(map[string]bool)
for _, impact := range blasts[itemType] {
potentialLinksMap[impact.ToSDPItemType.String()] = true
}
+ potentialLinks := make([]string, 0, len(potentialLinksMap))
for it := range potentialLinksMap {
potentialLinks = append(potentialLinks, it)
}
+ // Sort to ensure deterministic ordering
+ slices.Sort(potentialLinks)
+
return potentialLinks
}
diff --git a/tracing/main.go b/tracing/main.go
index 40740e3e..60ae313a 100644
--- a/tracing/main.go
+++ b/tracing/main.go
@@ -211,6 +211,14 @@ func InitTracerWithUpstreams(component, honeycombApiKey, sentryDSN string, opts
otlptracehttp.WithEndpoint("api.honeycomb.io"),
otlptracehttp.WithHeaders(map[string]string{"x-honeycomb-team": honeycombApiKey}),
)
+ } else {
+ // If no Honeycomb API key is provided, use the hardcoded OTLP collector
+ // endpoint, which is provided by the otel-collector service in the otel
+ // namespace. Since this a node-local service, it does not use TLS.
+ opts = append(opts,
+ otlptracehttp.WithEndpoint("otelcol-node-opentelemetry-collector.otel.svc.cluster.local:4318"),
+ otlptracehttp.WithInsecure(),
+ )
}
return InitTracer(component, opts...)