diff --git a/cmd/api/src/analysis/ad/ad_integration_test.go b/cmd/api/src/analysis/ad/ad_integration_test.go index 7758de47614..426ae77e1e1 100644 --- a/cmd/api/src/analysis/ad/ad_integration_test.go +++ b/cmd/api/src/analysis/ad/ad_integration_test.go @@ -1141,9 +1141,9 @@ func TestSyncLAPSPassword(t *testing.T) { harness.SyncLAPSPasswordHarness.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - if groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(testContext.Context(), db); err != nil { + if localGroupData, err := adAnalysis.FetchLocalGroupData(testContext.Context(), db); err != nil { t.Fatalf("error expanding groups in integration test; %v", err) - } else if _, err := adAnalysis.PostSyncLAPSPassword(testContext.Context(), db, groupExpansions); err != nil { + } else if _, err := adAnalysis.PostSyncLAPSPassword(testContext.Context(), db, localGroupData); err != nil { t.Fatalf("error creating SyncLAPSPassword edges in integration test; %v", err) } else { db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { @@ -1172,9 +1172,9 @@ func TestDCSync(t *testing.T) { harness.DCSyncHarness.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - if groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(testContext.Context(), db); err != nil { + if localGroupData, err := adAnalysis.FetchLocalGroupData(testContext.Context(), db); err != nil { t.Fatalf("error expanding groups in integration test; %v", err) - } else if _, err := adAnalysis.PostDCSync(testContext.Context(), db, groupExpansions); err != nil { + } else if _, err := adAnalysis.PostDCSync(testContext.Context(), db, localGroupData); err != nil { t.Fatalf("error creating DCSync edges in integration test; %v", err) } else { db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { @@ -1204,9 +1204,9 @@ func TestOwnsWriteOwnerPriorCollectorVersions(t *testing.T) { // To verify in Neo4j: MATCH (n:Computer) MATCH (u:User) RETURN n, u return nil }, func(harness integration.HarnessDetails, db graph.Database) { - if groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(testContext.Context(), db); err != nil { + if localGroupData, err := adAnalysis.FetchLocalGroupData(testContext.Context(), db); err != nil { t.Fatalf("error expanding groups in integration test; %v", err) - } else if _, err := adAnalysis.PostOwnsAndWriteOwner(testContext.Context(), db, groupExpansions); err != nil { + } else if _, err := adAnalysis.PostOwnsAndWriteOwner(testContext.Context(), db, localGroupData); err != nil { t.Fatalf("error creating Owns/WriteOwner edges in integration test; %v", err) } else { db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { @@ -1407,9 +1407,9 @@ func TestOwnsWriteOwner(t *testing.T) { // To verify in Neo4j: MATCH (n:Computer) MATCH (u:User) RETURN n, u return nil }, func(harness integration.HarnessDetails, db graph.Database) { - if groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(testContext.Context(), db); err != nil { + if localGroupData, err := adAnalysis.FetchLocalGroupData(testContext.Context(), db); err != nil { t.Fatalf("error expanding groups in integration test; %v", err) - } else if _, err := adAnalysis.PostOwnsAndWriteOwner(testContext.Context(), db, groupExpansions); err != nil { + } else if _, err := adAnalysis.PostOwnsAndWriteOwner(testContext.Context(), db, localGroupData); err != nil { t.Fatalf("error creating Owns/WriteOwner edges in integration test; %v", err) } else { db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { diff --git a/cmd/api/src/analysis/ad/adcs_integration_test.go b/cmd/api/src/analysis/ad/adcs_integration_test.go index d8ed25ef510..a484ed278c0 100644 --- a/cmd/api/src/analysis/ad/adcs_integration_test.go +++ b/cmd/api/src/analysis/ad/adcs_integration_test.go @@ -24,7 +24,6 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" ad2 "github.com/specterops/bloodhound/packages/go/analysis/ad" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema" "github.com/specterops/dawgs/ops" @@ -39,8 +38,8 @@ import ( "github.com/stretchr/testify/require" ) -func FetchADCSPrereqs(db graph.Database) (impact.PathAggregator, []*graph.Node, []*graph.Node, []*graph.Node, ad2.ADCSCache, error) { - if expansions, err := ad2.ExpandAllRDPLocalGroups(context.Background(), db); err != nil { +func FetchADCSPrereqs(db graph.Database) (*ad2.LocalGroupData, []*graph.Node, []*graph.Node, []*graph.Node, ad2.ADCSCache, error) { + if localGroupData, err := ad2.FetchLocalGroupData(context.Background(), db); err != nil { return nil, nil, nil, nil, ad2.ADCSCache{}, err } else { cache := ad2.NewADCSCache() @@ -50,7 +49,7 @@ func FetchADCSPrereqs(db graph.Database) (impact.PathAggregator, []*graph.Node, return nil, nil, nil, nil, ad2.ADCSCache{}, err } else { cache.BuildCache(context.Background(), db, enterpriseCertAuthorities, certTemplates) - return expansions, cache.GetEnterpriseCertAuthorities(), cache.GetCertTemplates(), cache.GetDomains(), cache, nil + return localGroupData, cache.GetEnterpriseCertAuthorities(), cache.GetCertTemplates(), cache.GetDomains(), cache, nil } } } @@ -64,7 +63,7 @@ func TestADCSESC1(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC1") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -79,7 +78,7 @@ func TestADCSESC1(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC1(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC1(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC1.String(), err) } return nil @@ -178,7 +177,7 @@ func TestADCSESC1(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC1 Authenticated Users") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -193,7 +192,7 @@ func TestADCSESC1(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC1(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC1(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC1.String(), err) } return nil @@ -628,7 +627,7 @@ func TestADCSESC3(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC3") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -643,7 +642,7 @@ func TestADCSESC3(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC3(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } return nil @@ -679,7 +678,7 @@ func TestADCSESC3(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC3") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -694,7 +693,7 @@ func TestADCSESC3(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC3(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } return nil @@ -742,7 +741,7 @@ func TestADCSESC3(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC3") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -757,7 +756,7 @@ func TestADCSESC3(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC3(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } return nil @@ -804,7 +803,7 @@ func TestADCSESC4(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC4 template 1") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -819,7 +818,7 @@ func TestADCSESC4(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC4(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } return nil @@ -869,7 +868,7 @@ func TestADCSESC4(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC4 template 2") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -884,7 +883,7 @@ func TestADCSESC4(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC4(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } return nil @@ -939,7 +938,7 @@ func TestADCSESC4(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC4 template 3") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -954,7 +953,7 @@ func TestADCSESC4(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC4(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } return nil @@ -990,7 +989,7 @@ func TestADCSESC4(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC4 template 4") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1005,7 +1004,7 @@ func TestADCSESC4(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC4(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } return nil @@ -1045,7 +1044,7 @@ func TestADCSESC4Composition(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC4 template 1") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1060,7 +1059,7 @@ func TestADCSESC4Composition(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC4(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } return nil @@ -1270,7 +1269,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1285,7 +1284,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1322,7 +1321,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1337,7 +1336,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1371,7 +1370,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1386,7 +1385,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1421,7 +1420,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1436,7 +1435,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1471,7 +1470,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1486,7 +1485,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1519,7 +1518,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1534,7 +1533,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1594,7 +1593,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1609,7 +1608,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1644,7 +1643,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1659,7 +1658,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1691,7 +1690,7 @@ func TestADCSESC9a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9A Authenticated Users") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1706,7 +1705,7 @@ func TestADCSESC9a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } return nil @@ -1745,7 +1744,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1760,7 +1759,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -1797,7 +1796,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1812,7 +1811,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -1845,7 +1844,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1860,7 +1859,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -1894,7 +1893,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1909,7 +1908,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -1943,7 +1942,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -1958,7 +1957,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -2022,7 +2021,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2037,7 +2036,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -2072,7 +2071,7 @@ func TestADCSESC9b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC9b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2087,7 +2086,7 @@ func TestADCSESC9b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC9b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } return nil @@ -2121,7 +2120,7 @@ func TestADCSESC6a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2136,7 +2135,7 @@ func TestADCSESC6a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } return nil @@ -2170,7 +2169,7 @@ func TestADCSESC6a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2185,7 +2184,7 @@ func TestADCSESC6a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } return nil @@ -2218,7 +2217,7 @@ func TestADCSESC6a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2233,7 +2232,7 @@ func TestADCSESC6a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } return nil @@ -2320,7 +2319,7 @@ func TestADCSESC6a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2335,7 +2334,7 @@ func TestADCSESC6a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } return nil @@ -2374,7 +2373,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b template 1") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2389,7 +2388,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2478,7 +2477,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b eca") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2493,7 +2492,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2527,7 +2526,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b principal edges") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2542,7 +2541,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2576,7 +2575,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2591,7 +2590,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2631,7 +2630,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2646,7 +2645,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2680,7 +2679,7 @@ func TestADCSESC6b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC6b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2695,7 +2694,7 @@ func TestADCSESC6b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC6b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } return nil @@ -2729,7 +2728,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2744,7 +2743,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -2782,7 +2781,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2797,7 +2796,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -2832,7 +2831,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2847,7 +2846,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -2883,7 +2882,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2898,7 +2897,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -2963,7 +2962,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -2978,7 +2977,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -3012,7 +3011,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3027,7 +3026,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -3061,7 +3060,7 @@ func TestADCSESC10a(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10a") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3076,7 +3075,7 @@ func TestADCSESC10a(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10a(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } return nil @@ -3110,7 +3109,7 @@ func TestADCSESC13(t *testing.T) { return nil }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC13") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3125,7 +3124,7 @@ func TestADCSESC13(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC13(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { return nil @@ -3177,7 +3176,7 @@ func TestADCSESC13(t *testing.T) { return nil }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC13") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3192,7 +3191,7 @@ func TestADCSESC13(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC13(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { return nil @@ -3249,7 +3248,7 @@ func TestADCSESC13(t *testing.T) { return nil }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC13") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3264,7 +3263,7 @@ func TestADCSESC13(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC13(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { return nil @@ -3343,7 +3342,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3358,7 +3357,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3394,7 +3393,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3409,7 +3408,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3443,7 +3442,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3458,7 +3457,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3492,7 +3491,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3507,7 +3506,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3573,7 +3572,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3588,7 +3587,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3622,7 +3621,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3637,7 +3636,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil @@ -3671,7 +3670,7 @@ func TestADCSESC10b(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "ADCS Post Process Test - ESC10b") - groupExpansions, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) + localGroupData, enterpriseCertAuthorities, _, domains, cache, err := FetchADCSPrereqs(db) require.Nil(t, err) for _, enterpriseCA := range enterpriseCertAuthorities { @@ -3686,7 +3685,7 @@ func TestADCSESC10b(t *testing.T) { } operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := ad2.PostADCSESC10b(ctx, tx, outC, groupExpansions, innerEnterpriseCA, targetDomains, cache); err != nil { + if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } return nil diff --git a/cmd/api/src/analysis/ad/ntlm_integration_test.go b/cmd/api/src/analysis/ad/ntlm_integration_test.go index 01cc8b2f8f1..3b72e7a9e3d 100644 --- a/cmd/api/src/analysis/ad/ntlm_integration_test.go +++ b/cmd/api/src/analysis/ad/ntlm_integration_test.go @@ -27,7 +27,6 @@ import ( "github.com/specterops/bloodhound/cmd/api/src/test/integration" "github.com/specterops/bloodhound/packages/go/analysis" ad2 "github.com/specterops/bloodhound/packages/go/analysis/ad" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -47,9 +46,9 @@ func TestPostNTLMRelayADCS(t *testing.T) { return nil }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToADCS") - expansions, _, _, _, err := fetchNTLMPrereqs(db) + localGroupData, _, _, _, err := fetchNTLMPrereqs(db) require.NoError(t, err) - ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, expansions) + ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, localGroupData) require.NoError(t, err) cache := ad2.NewADCSCache() @@ -94,9 +93,9 @@ func TestNTLMRelayToADCSComposition(t *testing.T) { return nil }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Composition Test - CoerceAndRelayNTLMToADCS") - expansions, _, _, _, err := fetchNTLMPrereqs(db) + localGroupData, _, _, _, err := fetchNTLMPrereqs(db) require.NoError(t, err) - ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, expansions) + ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, localGroupData) require.NoError(t, err) cache := ad2.NewADCSCache() @@ -152,9 +151,9 @@ func TestPostNTLMRelaySMB(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToSMB") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) - ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, groupExpansions) + ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -228,9 +227,9 @@ func TestPostNTLMRelaySMB(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM - CoerceAndRelayNTLMToSMB - Relay To Self") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) - ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, groupExpansions) + ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -280,9 +279,9 @@ func TestNTLMRelayToSMBComposition(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Composition Test - CoerceAndRelayNTLMToSMB") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) - ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, groupExpansions) + ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -352,13 +351,13 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToLDAP") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) ldapSigningCache, err := ad2.FetchLDAPSigningCache(testContext.Context(), db) require.NoError(t, err) - protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, groupExpansions) + protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -432,13 +431,13 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToLDAPS") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) ldapSigningCache, err := ad2.FetchLDAPSigningCache(testContext.Context(), db) require.NoError(t, err) - protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, groupExpansions) + protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -508,13 +507,13 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToLDAPS - Self Relay") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) ldapSigningCache, err := ad2.FetchLDAPSigningCache(testContext.Context(), db) require.NoError(t, err) - protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, groupExpansions) + protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -563,13 +562,13 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { }, func(harness integration.HarnessDetails, db graph.Database) { operation := analysis.NewPostRelationshipOperation(context.Background(), db, "NTLM Post Process Test - CoerceAndRelayNTLMToLDAP - Self Relay") - groupExpansions, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) + grouplocalGroupData, computers, _, authenticatedUsers, err := fetchNTLMPrereqs(db) require.NoError(t, err) ldapSigningCache, err := ad2.FetchLDAPSigningCache(testContext.Context(), db) require.NoError(t, err) - protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, groupExpansions) + protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { @@ -611,9 +610,9 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { }) } -func fetchNTLMPrereqs(db graph.Database) (expansions impact.PathAggregator, computers []*graph.Node, domains []*graph.Node, authenticatedUsers map[string]graph.ID, err error) { +func fetchNTLMPrereqs(db graph.Database) (localGroupData *ad2.LocalGroupData, computers []*graph.Node, domains []*graph.Node, authenticatedUsers map[string]graph.ID, err error) { cache := make(map[string]graph.ID) - if expansions, err = ad2.ExpandAllRDPLocalGroups(context.Background(), db); err != nil { + if localGroupData, err = ad2.FetchLocalGroupData(context.Background(), db); err != nil { return nil, nil, nil, cache, err } else if computers, err = ad2.FetchNodesByKind(context.Background(), db, ad.Computer); err != nil { return nil, nil, nil, cache, err @@ -627,6 +626,6 @@ func fetchNTLMPrereqs(db graph.Database) (expansions impact.PathAggregator, comp } else if domains, err = ad2.FetchNodesByKind(context.Background(), db, ad.Domain); err != nil { return nil, nil, nil, cache, err } else { - return expansions, computers, domains, cache, nil + return localGroupData, computers, domains, cache, nil } } diff --git a/cmd/api/src/analysis/ad/post.go b/cmd/api/src/analysis/ad/post.go index 2b4bd7a41ce..f59d98332c3 100644 --- a/cmd/api/src/analysis/ad/post.go +++ b/cmd/api/src/analysis/ad/post.go @@ -28,36 +28,41 @@ import ( func Post(ctx context.Context, db graph.Database, adcsEnabled, citrixEnabled, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*analysis.AtomicPostProcessingStats, error) { aggregateStats := analysis.NewAtomicPostProcessingStats() - if stats, err := analysis.DeleteTransitEdges(ctx, db, graph.Kinds{ad.Entity, azure.Entity}, ad.PostProcessedRelationships()...); err != nil { + + if deleteTransitEdgesStats, err := analysis.DeleteTransitEdges(ctx, db, graph.Kinds{ad.Entity, azure.Entity}, ad.PostProcessedRelationships()...); err != nil { return &aggregateStats, err - } else if groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(ctx, db); err != nil { + } else if localGroupData, err := adAnalysis.FetchLocalGroupData(ctx, db); err != nil { return &aggregateStats, err - } else if dcSyncStats, err := adAnalysis.PostDCSync(ctx, db, groupExpansions); err != nil { + } else if dcSyncStats, err := adAnalysis.PostDCSync(ctx, db, localGroupData); err != nil { return &aggregateStats, err } else if protectAdminGroupsStats, err := adAnalysis.PostProtectAdminGroups(ctx, db); err != nil { return &aggregateStats, err - } else if syncLAPSStats, err := adAnalysis.PostSyncLAPSPassword(ctx, db, groupExpansions); err != nil { + } else if syncLAPSStats, err := adAnalysis.PostSyncLAPSPassword(ctx, db, localGroupData); err != nil { return &aggregateStats, err } else if hasTrustKeyStats, err := adAnalysis.PostHasTrustKeys(ctx, db); err != nil { return &aggregateStats, err - } else if localGroupStats, err := adAnalysis.PostLocalGroups(ctx, db, groupExpansions, false, citrixEnabled); err != nil { + } else if localGroupStats, err := adAnalysis.PostLocalGroups(ctx, db, localGroupData); err != nil { + return &aggregateStats, err + } else if canRDPStats, err := adAnalysis.PostCanRDP(ctx, db, localGroupData, true, citrixEnabled); err != nil { return &aggregateStats, err - } else if adcsStats, adcsCache, err := adAnalysis.PostADCS(ctx, db, groupExpansions, adcsEnabled); err != nil { + } else if adcsStats, adcsCache, err := adAnalysis.PostADCS(ctx, db, localGroupData, adcsEnabled); err != nil { return &aggregateStats, err - } else if ownsStats, err := adAnalysis.PostOwnsAndWriteOwner(ctx, db, groupExpansions); err != nil { + } else if ownsStats, err := adAnalysis.PostOwnsAndWriteOwner(ctx, db, localGroupData); err != nil { return &aggregateStats, err - } else if ntlmStats, err := adAnalysis.PostNTLM(ctx, db, groupExpansions, adcsCache, ntlmEnabled, compositionCounter); err != nil { + } else if ntlmStats, err := adAnalysis.PostNTLM(ctx, db, localGroupData, adcsCache, ntlmEnabled, compositionCounter); err != nil { return &aggregateStats, err } else { - aggregateStats.Merge(stats) // DeleteTransitEdges - aggregateStats.Merge(dcSyncStats) // PostDCSync - aggregateStats.Merge(protectAdminGroupsStats) + aggregateStats.Merge(deleteTransitEdgesStats) aggregateStats.Merge(syncLAPSStats) aggregateStats.Merge(hasTrustKeyStats) + aggregateStats.Merge(dcSyncStats) + aggregateStats.Merge(protectAdminGroupsStats) aggregateStats.Merge(localGroupStats) + aggregateStats.Merge(canRDPStats) aggregateStats.Merge(adcsStats) aggregateStats.Merge(ownsStats) aggregateStats.Merge(ntlmStats) + return &aggregateStats, nil } } diff --git a/cmd/api/src/analysis/analysis_integration_test.go b/cmd/api/src/analysis/analysis_integration_test.go index d745274ba3b..ed8d0b38e46 100644 --- a/cmd/api/src/analysis/analysis_integration_test.go +++ b/cmd/api/src/analysis/analysis_integration_test.go @@ -33,17 +33,28 @@ import ( "github.com/stretchr/testify/require" ) +func FetchCanRDPData(ctx context.Context, graphDB graph.Database) (*adAnalysis.CanRDPData, error) { + if localGroupData, err := adAnalysis.FetchLocalGroupData(ctx, graphDB); err != nil { + return nil, err + } else { + return localGroupData.FetchCanRDPData(ctx, graphDB) + } +} + func TestFetchRDPEnsureNoDescent(t *testing.T) { testContext := integration.NewGraphTestContext(t, schema.DefaultGraphSchema()) testContext.DatabaseTestWithSetup(func(harness *integration.HarnessDetails) error { harness.RDPB.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(context.Background(), db) + canRDPData, err := FetchCanRDPData(context.Background(), db) require.Nil(t, err) require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDPB.Computer.ID, groupExpansions, false, false) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDPB.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, false, false) require.Nil(t, err) // We should expect all groups that have the RIL incoming privilege to the computer @@ -62,12 +73,14 @@ func TestFetchCanRDPEntityBitmapForComputer(t *testing.T) { harness.RDP.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(context.Background(), db) + canRDPData, err := FetchCanRDPData(context.Background(), db) require.Nil(t, err) - // Enforced URA validation require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDP.Computer.ID, groupExpansions, true, false) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDP.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, true, false) require.Nil(t, err) // We should expect all entities that have the RIL incoming privilege to the computer @@ -94,7 +107,10 @@ func TestFetchCanRDPEntityBitmapForComputer(t *testing.T) { // Unenforced URA validation. result set should only include first degree members of `Remote Desktop Users` group require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDP.Computer.ID, groupExpansions, false, false) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDP.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, false, false) require.Nil(t, err) require.True(t, rdpEnabledEntityIDBitmap.Contains(harness.RDP.IrshadUser.ID.Uint64())) @@ -123,12 +139,15 @@ func TestFetchCanRDPEntityBitmapForComputer(t *testing.T) { })) // Recalculate group expansions - groupExpansions, err = adAnalysis.ExpandAllRDPLocalGroups(context.Background(), db) + canRDPData, err = FetchCanRDPData(context.Background(), db) require.Nil(t, err) // result set should only include first degree members of `Remote Desktop Users` group. test.RequireNilErr(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDP.Computer.ID, groupExpansions, true, false) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDP.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, false, false) require.Nil(t, err) require.Equal(t, 6, int(rdpEnabledEntityIDBitmap.Cardinality())) @@ -151,12 +170,15 @@ func TestFetchCanRDPEntityBitmapForComputerWithCitrix(t *testing.T) { harness.RDPHarnessWithCitrix.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - groupExpansions, err := adAnalysis.ExpandAllRDPLocalGroups(context.Background(), db) + canRDPData, err := FetchCanRDPData(context.Background(), db) require.Nil(t, err) // the Remote Desktop Users group does not have an RIL(Remote Interactive Login) edge to the computer. require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDPHarnessWithCitrix.Computer.ID, groupExpansions, true, true) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDPHarnessWithCitrix.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, true, true) require.Nil(t, err) // We should expect the intersection of members of `Direct Access Users`, with entities that have the RIL privilege to the computer @@ -172,10 +194,13 @@ func TestFetchCanRDPEntityBitmapForComputerWithCitrix(t *testing.T) { // When citrix is enabled but URA is not enforced, we should expect the cross product of Remote Desktop Users and Direct Access Users require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDPHarnessWithCitrix.Computer.ID, groupExpansions, false, true) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDPHarnessWithCitrix.Computer.ID) require.Nil(t, err) - require.Equal(t, 5, int(rdpEnabledEntityIDBitmap.Cardinality())) + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, false, true) + require.Nil(t, err) + + require.Equalf(t, 5, int(rdpEnabledEntityIDBitmap.Cardinality()), "members %v", rdpEnabledEntityIDBitmap.Slice()) require.True(t, rdpEnabledEntityIDBitmap.Contains(harness.RDPHarnessWithCitrix.IrshadUser.ID.Uint64())) require.True(t, rdpEnabledEntityIDBitmap.Contains(harness.RDPHarnessWithCitrix.UliUser.ID.Uint64())) @@ -194,11 +219,14 @@ func TestFetchCanRDPEntityBitmapForComputerWithCitrix(t *testing.T) { })) // Recalculate group expansions - groupExpansions, err = adAnalysis.ExpandAllRDPLocalGroups(context.Background(), db) + canRDPData, err = FetchCanRDPData(context.Background(), db) require.Nil(t, err) require.Nil(t, db.ReadTransaction(context.Background(), func(tx graph.Transaction) error { - rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(tx, harness.RDPHarnessWithCitrix.Computer.ID, groupExpansions, true, true) + rdpComputerData, err := canRDPData.FetchCanRDPComputerData(tx, harness.RDPHarnessWithCitrix.Computer.ID) + require.Nil(t, err) + + rdpEnabledEntityIDBitmap, err := adAnalysis.FetchCanRDPEntityBitmapForComputer(rdpComputerData, true, true) require.Nil(t, err) // We should expect the cross product of members of `Direct Access Users,` `Remote Desktop Users`, and entities with RIL privileges to diff --git a/cmd/api/src/analysis/membership_integration_test.go b/cmd/api/src/analysis/membership_integration_test.go index 4291e8bb5f7..d2da6a8090a 100644 --- a/cmd/api/src/analysis/membership_integration_test.go +++ b/cmd/api/src/analysis/membership_integration_test.go @@ -27,6 +27,7 @@ import ( analysis "github.com/specterops/bloodhound/packages/go/analysis/ad" schema "github.com/specterops/bloodhound/packages/go/graphschema" "github.com/specterops/bloodhound/packages/go/graphschema/ad" + "github.com/specterops/dawgs/algo" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/query" "github.com/stretchr/testify/require" @@ -73,20 +74,23 @@ func TestAnalyzeExposure(t *testing.T) { }) } -func TestResolveAllGroupMemberships(t *testing.T) { +func TestResolveReachOfGroupMembershipComponents(t *testing.T) { testContext := integration.NewGraphTestContext(t, schema.DefaultGraphSchema()) testContext.DatabaseTestWithSetup(func(harness *integration.HarnessDetails) error { harness.RDP.Setup(testContext) return nil }, func(harness integration.HarnessDetails, db graph.Database) { - memberships, err := analysis.ResolveAllGroupMemberships(context.Background(), db) + memberships, err := algo.FetchReachabilityCache(context.Background(), db, query.KindIn(query.Relationship(), ad.MemberOf, ad.MemberOfLocalGroup)) test.RequireNilErr(t, err) - require.Equal(t, 3, int(memberships.Cardinality(harness.RDP.DomainGroupA.ID.Uint64()).Cardinality())) - require.Equal(t, 1, int(memberships.Cardinality(harness.RDP.DomainGroupB.ID.Uint64()).Cardinality())) - require.Equal(t, 1, int(memberships.Cardinality(harness.RDP.DomainGroupC.ID.Uint64()).Cardinality())) - require.Equal(t, 1, int(memberships.Cardinality(harness.RDP.DomainGroupD.ID.Uint64()).Cardinality())) - require.Equal(t, 2, int(memberships.Cardinality(harness.RDP.DomainGroupE.ID.Uint64()).Cardinality())) + // Because the algorithm uses a condensed (SCC) version of the directed graph, component membership + // will always include the origin member that reach was computed from. Typically, downstream users + // of this function will remove the ID from their merged bitmap after reachability is computed. + require.Equal(t, 4, int(memberships.ReachOfComponentContainingMember(harness.RDP.DomainGroupA.ID.Uint64(), graph.DirectionInbound).Cardinality())) + require.Equal(t, 2, int(memberships.ReachOfComponentContainingMember(harness.RDP.DomainGroupB.ID.Uint64(), graph.DirectionInbound).Cardinality())) + require.Equal(t, 2, int(memberships.ReachOfComponentContainingMember(harness.RDP.DomainGroupC.ID.Uint64(), graph.DirectionInbound).Cardinality())) + require.Equal(t, 2, int(memberships.ReachOfComponentContainingMember(harness.RDP.DomainGroupD.ID.Uint64(), graph.DirectionInbound).Cardinality())) + require.Equal(t, 3, int(memberships.ReachOfComponentContainingMember(harness.RDP.DomainGroupE.ID.Uint64(), graph.DirectionInbound).Cardinality())) }) } diff --git a/cmd/api/src/analysis/post_integration_test.go b/cmd/api/src/analysis/post_integration_test.go index bd5278ebf84..1ce6c9a3cae 100644 --- a/cmd/api/src/analysis/post_integration_test.go +++ b/cmd/api/src/analysis/post_integration_test.go @@ -81,12 +81,14 @@ func TestCrossProduct(t *testing.T) { testContext.DatabaseTransactionTestWithSetup(func(harness *integration.HarnessDetails) error { harness.ShortcutHarness.Setup(testContext) return nil - }, func(harness integration.HarnessDetails, db graph.Database, tx graph.Transaction) { + }, func(harness integration.HarnessDetails, graphDB graph.Database, tx graph.Transaction) { firstSet := []*graph.Node{testContext.Harness.ShortcutHarness.Group1} secondSet := []*graph.Node{testContext.Harness.ShortcutHarness.Group2} - groupExpansions, err := ad2.ExpandAllRDPLocalGroups(context.Background(), db) - require.Nil(t, err) - results := ad2.CalculateCrossProductNodeSets(tx, groupExpansions, firstSet, secondSet) + + excludedGroups, err := ad2.FetchLocalGroupData(context.Background(), graphDB) + require.NoError(t, err) + + results := ad2.CalculateCrossProductNodeSets(excludedGroups, firstSet, secondSet) require.Truef(t, results.Contains(harness.ShortcutHarness.Group3.ID.Uint64()), "missing id %d", harness.ShortcutHarness.Group3.ID.Uint64()) }) } @@ -96,12 +98,14 @@ func TestCrossProductAuthUsers(t *testing.T) { testContext.DatabaseTransactionTestWithSetup(func(harness *integration.HarnessDetails) error { harness.ShortcutHarnessAuthUsers.Setup(testContext) return nil - }, func(harness integration.HarnessDetails, db graph.Database, tx graph.Transaction) { + }, func(harness integration.HarnessDetails, graphDB graph.Database, tx graph.Transaction) { firstSet := []*graph.Node{testContext.Harness.ShortcutHarnessAuthUsers.Group1} secondSet := []*graph.Node{testContext.Harness.ShortcutHarnessAuthUsers.Group2} - groupExpansions, err := ad2.ExpandAllRDPLocalGroups(context.Background(), db) - require.Nil(t, err) - results := ad2.CalculateCrossProductNodeSets(tx, groupExpansions, firstSet, secondSet) + + excludedGroups, err := ad2.FetchLocalGroupData(context.Background(), graphDB) + require.NoError(t, err) + + results := ad2.CalculateCrossProductNodeSets(excludedGroups, firstSet, secondSet) require.True(t, results.Contains(harness.ShortcutHarnessAuthUsers.Group2.ID.Uint64())) }) } @@ -111,12 +115,14 @@ func TestCrossProductEveryone(t *testing.T) { testContext.DatabaseTransactionTestWithSetup(func(harness *integration.HarnessDetails) error { harness.ShortcutHarnessEveryone.Setup(testContext) return nil - }, func(harness integration.HarnessDetails, db graph.Database, tx graph.Transaction) { + }, func(harness integration.HarnessDetails, graphDB graph.Database, tx graph.Transaction) { firstSet := []*graph.Node{testContext.Harness.ShortcutHarnessEveryone.Group1} secondSet := []*graph.Node{testContext.Harness.ShortcutHarnessEveryone.Group2} - groupExpansions, err := ad2.ExpandAllRDPLocalGroups(context.Background(), db) - require.Nil(t, err) - results := ad2.CalculateCrossProductNodeSets(tx, groupExpansions, firstSet, secondSet) + + excludedGroups, err := ad2.FetchLocalGroupData(context.Background(), graphDB) + require.NoError(t, err) + + results := ad2.CalculateCrossProductNodeSets(excludedGroups, firstSet, secondSet) require.True(t, results.Contains(harness.ShortcutHarnessEveryone.Group2.ID.Uint64())) }) } @@ -126,12 +132,14 @@ func TestCrossProductEveryone2(t *testing.T) { testContext.DatabaseTransactionTestWithSetup(func(harness *integration.HarnessDetails) error { harness.ShortcutHarnessEveryone2.Setup(testContext) return nil - }, func(harness integration.HarnessDetails, db graph.Database, tx graph.Transaction) { + }, func(harness integration.HarnessDetails, graphDB graph.Database, tx graph.Transaction) { firstSet := []*graph.Node{testContext.Harness.ShortcutHarnessEveryone2.Group1} secondSet := []*graph.Node{testContext.Harness.ShortcutHarnessEveryone2.Group2} - groupExpansions, err := ad2.ExpandAllRDPLocalGroups(context.Background(), db) - require.Nil(t, err) - results := ad2.CalculateCrossProductNodeSets(tx, groupExpansions, firstSet, secondSet) + + excludedGroups, err := ad2.FetchLocalGroupData(context.Background(), graphDB) + require.NoError(t, err) + + results := ad2.CalculateCrossProductNodeSets(excludedGroups, firstSet, secondSet) require.True(t, results.Contains(harness.ShortcutHarnessEveryone2.Group1.ID.Uint64())) require.True(t, results.Contains(harness.ShortcutHarnessEveryone2.Group2.ID.Uint64())) }) diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 19f35b49966..ade65399034 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -83,7 +83,6 @@ func (s *Daemon) Start(ctx context.Context) { for { select { case <-pruningTicker.C: - s.WithDatapipeStatus(ctx, model.DatapipeStatusPruning, s.pipeline.PruneData) case <-datapipeLoopTimer.C: diff --git a/go.mod b/go.mod index d598badcc89..47502d5b6f5 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ module github.com/specterops/bloodhound go 1.24.11 require ( - cuelang.org/go v0.15.1 + cuelang.org/go v0.15.3 github.com/Masterminds/semver/v3 v3.3.1 github.com/RoaringBitmap/roaring/v2 v2.14.4 github.com/bloodhoundad/azurehound/v2 v2.6.0 @@ -36,7 +36,7 @@ require ( github.com/gorilla/mux v1.8.1 github.com/gorilla/schema v1.4.1 github.com/hashicorp/golang-lru v1.0.2 - github.com/jackc/pgx/v5 v5.7.6 + github.com/jackc/pgx/v5 v5.8.0 github.com/jedib0t/go-pretty/v6 v6.6.7 github.com/lib/pq v1.10.9 github.com/neo4j/neo4j-go-driver/v5 v5.28.4 @@ -47,17 +47,17 @@ require ( github.com/russellhaering/goxmldsig v1.5.0 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 github.com/shirou/gopsutil/v3 v3.24.5 - github.com/specterops/dawgs v0.3.2 + github.com/specterops/dawgs v0.4.2 github.com/stretchr/testify v1.11.1 github.com/teambition/rrule-go v1.8.2 github.com/ulule/limiter/v3 v3.11.2 github.com/unrolled/secure v1.17.0 go.uber.org/mock v0.5.2 - golang.org/x/crypto v0.46.0 - golang.org/x/mod v0.31.0 + golang.org/x/crypto v0.47.0 + golang.org/x/mod v0.32.0 golang.org/x/oauth2 v0.32.0 - golang.org/x/text v0.32.0 - golang.org/x/tools v0.40.0 + golang.org/x/text v0.33.0 + golang.org/x/tools v0.41.0 gorm.io/driver/postgres v1.5.10 gorm.io/gorm v1.25.12 ) @@ -84,7 +84,7 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.2.0 // indirect - github.com/axiomhq/hyperloglog v0.2.5 // indirect + github.com/axiomhq/hyperloglog v0.2.6 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beevik/etree v1.5.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -273,12 +273,12 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index e81015ee900..cc4ca2d5f21 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 h1:4k1yAtPvZJZQTu8DRY8muBo0LHv6TqtrE0AO5n6IPYs= cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= -cuelang.org/go v0.15.1 h1:MRnjc/KJE+K42rnJ3a+425f1jqXeOOgq9SK4tYRTtWw= -cuelang.org/go v0.15.1/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= +cuelang.org/go v0.15.3 h1:JKR/lZVwuIGlLTGIaJ0jONz9+CK3UDx06sQ6DDxNkaE= +cuelang.org/go v0.15.3/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= @@ -52,8 +52,8 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= -github.com/axiomhq/hyperloglog v0.2.5 h1:Hefy3i8nAs8zAI/tDp+wE7N+Ltr8JnwiW3875pvl0N8= -github.com/axiomhq/hyperloglog v0.2.5/go.mod h1:DLUK9yIzpU5B6YFLjxTIcbHu1g4Y1WQb1m5RH3radaM= +github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw= +github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs= @@ -320,8 +320,8 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -552,8 +552,8 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/specterops/dawgs v0.3.2 h1:xXYQdDcCenP0zmlbvDpWpEyaZHISqZLqXQnDVBJDRBA= -github.com/specterops/dawgs v0.3.2/go.mod h1:Ic7/TUXbLiSLLK954tncvAHkPpnOrI6FPx5asusavwQ= +github.com/specterops/dawgs v0.4.2 h1:+rZXp5yO+ynN2unFCTBOgN5KkCvlT65tlgD7ELWSZtA= +github.com/specterops/dawgs v0.4.2/go.mod h1:K4FBsW20t7P2jPBn4h04aSr/MZ2M3bivehexfEBB8Cs= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= @@ -693,10 +693,10 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= -golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM= -golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= @@ -715,8 +715,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -735,8 +735,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -783,10 +783,10 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= -golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -809,8 +809,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -836,8 +836,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= diff --git a/packages/go/analysis/ad/ad.go b/packages/go/analysis/ad/ad.go index e90f57109ca..4d10a4cc6e7 100644 --- a/packages/go/analysis/ad/ad.go +++ b/packages/go/analysis/ad/ad.go @@ -26,7 +26,6 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis/ad/internal/nodeprops" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -475,7 +474,7 @@ func createOrUpdateWellKnownLink( // CalculateCrossProductNodeSets finds the intersection of the given sets of nodes. // See CalculateCrossProductNodeSetsDoc.md for explaination of the specialGroups (Authenticated Users and Everyone) and why we treat them the way we do -func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact.PathAggregator, nodeSlices ...[]*graph.Node) cardinality.Duplex[uint64] { +func CalculateCrossProductNodeSets(localGroupData *LocalGroupData, nodeSlices ...[]*graph.Node) cardinality.Duplex[uint64] { if len(nodeSlices) < 2 { slog.Error("Cross products require at least 2 nodesets") return cardinality.NewBitmap64() @@ -497,42 +496,48 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact. resultEntities = cardinality.NewBitmap64() ) - // Get the IDs of the Auth. Users and Everyone groups - specialGroups, err := FetchAuthUsersAndEveryoneGroups(tx) - if err != nil { - slog.Error(fmt.Sprintf("Could not fetch groups: %s", err.Error())) - } - // Unroll all nodesets for _, nodeSlice := range nodeSlices { var ( - firstDegreeSet = cardinality.NewBitmap64() - unrolledSet = cardinality.NewBitmap64() + // Skip sets containing Auth. Users or Everyone + nodeExcluded = false + + firstDegreeSet = cardinality.NewBitmap64() + entityReachBitmap = cardinality.NewBitmap64() ) for _, entity := range nodeSlice { entityID := entity.ID.Uint64() firstDegreeSet.Add(entityID) - unrolledSet.Add(entityID) + entityReachBitmap.Add(entityID) if entity.Kinds.ContainsOneOf(ad.Group, ad.LocalGroup) { - unrolledSet.Or(groupExpansions.Cardinality(entity.ID.Uint64())) - } - } + if localGroupData.ExcludedShortcutGroups.Contains(entityID) { + nodeExcluded = true + } else { + entityReach := localGroupData.GroupMembershipCache.ReachOfComponentContainingMember(entityID, graph.DirectionInbound) + entityReachBitmap.Or(entityReach) + + if entityReach.Cardinality() > 0 { + localGroupData.ExcludedShortcutGroups.Each(func(excludedNode uint64) bool { + if entityReach.Contains(excludedNode) { + nodeExcluded = true + } - // Skip sets containing Auth. Users or Everyone - hasSpecialGroup := false + return !nodeExcluded + }) + } + } + } - for _, specialGroup := range specialGroups { - if unrolledSet.Contains(specialGroup.ID.Uint64()) { - hasSpecialGroup = true + if nodeExcluded { break } } - if !hasSpecialGroup { - unrolledSets = append(unrolledSets, unrolledSet) + if !nodeExcluded { + unrolledSets = append(unrolledSets, entityReachBitmap) firstDegreeSets = append(firstDegreeSets, firstDegreeSet) } } @@ -546,15 +551,16 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact. } return resultEntities - } else if len(firstDegreeSets) == 1 { // If every nodeset (unrolled) except one includes Auth. Users/Everyone then return that one nodeset (first degree) + } else if len(firstDegreeSets) == 1 { + // If every nodeset (unrolled) except one includes Auth. Users/Everyone then return that one nodeset (first degree) return firstDegreeSets[0] - } else { - // This means that len(firstDegreeSets) must be greater than or equal to 2 i.e. we have at least two nodesets (unrolled) without Auth. Users/Everyone - checkSet.Or(unrolledSets[1]) + } - for _, unrolledSet := range unrolledSets[2:] { - checkSet.And(unrolledSet) - } + // This means that len(firstDegreeSets) must be greater than or equal to 2 i.e. we have at least two nodesets (unrolled) without Auth. Users/Everyone + checkSet.Or(unrolledSets[1]) + + for _, unrolledSet := range unrolledSets[2:] { + checkSet.And(unrolledSet) } // Check first degree principals in our reference set (firstDegreeSets[0]) first @@ -562,7 +568,7 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact. if checkSet.Contains(id) { resultEntities.Add(id) } else { - unrolledRefSet.Or(groupExpansions.Cardinality(id)) + localGroupData.GroupMembershipCache.OrReach(id, graph.DirectionInbound, unrolledRefSet) } return true @@ -570,10 +576,19 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact. // Find all the groups in our secondary targets and map them to their cardinality in our expansions // Saving off to a map to prevent multiple lookups on the expansions - tempMap := map[uint64]uint64{} + var ( + tempMap = map[uint64]uint64{} + tempBitmap = cardinality.NewBitmap64() + ) + unrolledRefSet.Each(func(id uint64) bool { // If group expansions contains this ID and its cardinality is > 0, it's a group/localgroup - idCardinality := groupExpansions.Cardinality(id).Cardinality() + localGroupData.GroupMembershipCache.OrReach(id, graph.DirectionInbound, tempBitmap) + idCardinality := tempBitmap.Cardinality() + + // Clear the bitmap eagerly + tempBitmap.Clear() + if idCardinality > 0 { tempMap[id] = idCardinality } @@ -604,7 +619,7 @@ func CalculateCrossProductNodeSets(tx graph.Transaction, groupExpansions impact. resultEntities.Add(groupId) unrolledRefSet.Remove(groupId) - unrolledRefSet.Xor(groupExpansions.Cardinality(groupId)) + localGroupData.GroupMembershipCache.XorReach(groupId, graph.DirectionInbound, unrolledRefSet) } else { // If this isn't a match, remove it from the second set to ensure we don't check it again, but leave its membership unrolledRefSet.Remove(groupId) diff --git a/packages/go/analysis/ad/adcs.go b/packages/go/analysis/ad/adcs.go index e5883a2fbb0..aa780260cc0 100644 --- a/packages/go/analysis/ad/adcs.go +++ b/packages/go/analysis/ad/adcs.go @@ -23,7 +23,6 @@ import ( "log/slog" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/graph" ) @@ -34,7 +33,7 @@ var ( EkuCertRequestAgent = "1.3.6.1.4.1.311.20.2.1" ) -func PostADCS(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator, adcsEnabled bool) (*analysis.AtomicPostProcessingStats, ADCSCache, error) { +func PostADCS(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsEnabled bool) (*analysis.AtomicPostProcessingStats, ADCSCache, error) { var cache = NewADCSCache() if enterpriseCertAuthorities, err := FetchNodesByKind(ctx, db, ad.EnterpriseCA); err != nil { return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching enterpriseCA nodes: %w", err) @@ -67,7 +66,7 @@ func PostADCS(ctx context.Context, db graph.Database, groupExpansions impact.Pat targetDomains.Add(innerDomain) } } - processEnterpriseCAWithValidCertChainToDomain(innerEnterpriseCA, targetDomains, groupExpansions, cache, operation) + processEnterpriseCAWithValidCertChainToDomain(innerEnterpriseCA, targetDomains, localGroupData, cache, operation) } return &operation.Stats, cache, operation.Done() } @@ -107,7 +106,7 @@ func postADCSPreProcessStep2(ctx context.Context, db graph.Database, cache ADCSC } } -func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, targetDomains *graph.NodeSet, groupExpansions impact.PathAggregator, cache ADCSCache, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { +func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, targetDomains *graph.NodeSet, localGroupData *LocalGroupData, cache ADCSCache, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { if err := PostGoldenCert(ctx, tx, outC, enterpriseCA, targetDomains); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) @@ -118,7 +117,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC1(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC1(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC1.String(), err)) @@ -127,7 +126,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC3(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC3(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC3.String(), err)) @@ -136,7 +135,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC4(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC4(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC4.String(), err)) @@ -145,7 +144,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC6a(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC6a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6a.String(), err)) @@ -154,7 +153,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC6b(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC6b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC6b.String(), err)) @@ -163,7 +162,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC9a(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC9a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9a.String(), err)) @@ -172,7 +171,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC9b(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC9b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC9b.String(), err)) @@ -181,7 +180,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC10a(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC10a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10a.String(), err)) @@ -190,7 +189,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC10b(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC10b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC10b.String(), err)) @@ -199,7 +198,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar }) operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if err := PostADCSESC13(ctx, tx, outC, groupExpansions, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { + if err := PostADCSESC13(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) } else if err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Failed post processing for %s: %v", ad.ADCSESC13.String(), err)) diff --git a/packages/go/analysis/ad/adcscache.go b/packages/go/analysis/ad/adcscache.go index ba86dcf0694..2adef3778f9 100644 --- a/packages/go/analysis/ad/adcscache.go +++ b/packages/go/analysis/ad/adcscache.go @@ -22,6 +22,7 @@ import ( "log/slog" "sync" + "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/ein" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -72,6 +73,8 @@ func NewADCSCache() ADCSCache { } func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpriseCertAuthorities, certTemplates []*graph.Node) error { + defer measure.ContextMeasure(ctx, slog.LevelInfo, "ADCSCache.BuildCache")() + s.mu.Lock() defer s.mu.Unlock() @@ -191,7 +194,6 @@ func (s *ADCSCache) BuildCache(ctx context.Context, db graph.Database, enterpris slog.ErrorContext(ctx, fmt.Sprintf("Error building adcs cache %v", err)) } - slog.InfoContext(ctx, "Finished building adcs cache") return err } diff --git a/packages/go/analysis/ad/esc1.go b/packages/go/analysis/ad/esc1.go index 7739c0cf10b..f1df6a23c82 100644 --- a/packages/go/analysis/ad/esc1.go +++ b/packages/go/analysis/ad/esc1.go @@ -23,7 +23,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -33,7 +32,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, expandedGroups impact.PathAggregator, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { return nil @@ -46,7 +45,7 @@ func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysi } else if !valid { continue } else { - results.Or(CalculateCrossProductNodeSets(tx, expandedGroups, cache.GetCertTemplateEnrollers(certTemplate.ID), ecaEnrollers)) + results.Or(CalculateCrossProductNodeSets(localGroupData, cache.GetCertTemplateEnrollers(certTemplate.ID), ecaEnrollers)) } } } diff --git a/packages/go/analysis/ad/esc10.go b/packages/go/analysis/ad/esc10.go index 27031f1ffd6..858d4d94a5f 100644 --- a/packages/go/analysis/ad/esc10.go +++ b/packages/go/analysis/ad/esc10.go @@ -23,7 +23,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/ein" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" @@ -34,7 +33,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil } else if ecaEnrollers := cache.GetEnterpriseCAEnrollers(eca.ID); len(ecaEnrollers) == 0 { @@ -52,7 +51,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { - victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) + victimBitmap := getVictimBitmap(localGroupData, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) @@ -82,7 +81,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy return nil } -func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { return nil } else if ecaEnrollers := cache.GetEnterpriseCAEnrollers(enterpriseCA.ID); len(ecaEnrollers) == 0 { @@ -100,7 +99,7 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { - victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID)) + victimBitmap := getVictimBitmap(localGroupData, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc10b attacker nodes: %v", err)) diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index 0ef7c8dbafc..3d70f510dbb 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -24,7 +24,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -34,7 +33,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil } else { @@ -51,7 +50,7 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys } else if len(groupNodes) == 0 { continue } else { - controlBitmap := CalculateCrossProductNodeSets(tx, groupExpansions, ecaEnrollers, cache.GetCertTemplateEnrollers(template.ID)) + controlBitmap := CalculateCrossProductNodeSets(localGroupData, ecaEnrollers, cache.GetCertTemplateEnrollers(template.ID)) if filtered, err := filterUserDNSResults(tx, controlBitmap, template); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc13: %v", err)) continue diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index 92664f29243..182671e75a7 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -25,7 +25,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" @@ -36,7 +35,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca2 *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca2 *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca2.ID); len(publishedCertTemplates) == 0 { return nil @@ -89,8 +88,8 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi slog.ErrorContext(ctx, fmt.Sprintf("Error getting delegated agents for cert template %d: %v", certTemplateTwo.ID, err)) } else { for _, eca1 := range publishedECAs { - tempResults := CalculateCrossProductNodeSets(tx, - groupExpansions, + tempResults := CalculateCrossProductNodeSets( + localGroupData, certTemplateEnrollersOne, certTemplateEnrollersTwo, cache.GetEnterpriseCAEnrollers(eca1.ID), @@ -107,8 +106,8 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi } } else { for _, eca1 := range publishedECAs { - tempResults := CalculateCrossProductNodeSets(tx, - groupExpansions, + tempResults := CalculateCrossProductNodeSets( + localGroupData, certTemplateEnrollersOne, certTemplateEnrollersTwo, cache.GetEnterpriseCAEnrollers(eca1.ID), diff --git a/packages/go/analysis/ad/esc4.go b/packages/go/analysis/ad/esc4.go index 2a821cac862..2ddd03b9b54 100644 --- a/packages/go/analysis/ad/esc4.go +++ b/packages/go/analysis/ad/esc4.go @@ -23,7 +23,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -33,7 +32,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { // 1. principals := cardinality.NewBitmap64() publishedTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID) @@ -61,16 +60,16 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi // 2a. principals that control the cert template principals.Or( - CalculateCrossProductNodeSets(tx, - groupExpansions, + CalculateCrossProductNodeSets( + localGroupData, enterpriseCAEnrollers, certTemplateControllers, )) // 2b. principals with `Enroll/AllExtendedRights` + `Generic Write` combination on the cert template principals.Or( - CalculateCrossProductNodeSets(tx, - groupExpansions, + CalculateCrossProductNodeSets( + localGroupData, enterpriseCAEnrollers, principalsWithGenericWrite.Slice(), principalsWithEnrollOrAllExtendedRights.Slice(), @@ -86,8 +85,8 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi } // 2d. principals with `Enroll/AllExtendedRights` + `WritePKINameFlag` + `WritePKIEnrollmentFlag` on the cert template - principals.Or(CalculateCrossProductNodeSets(tx, - groupExpansions, + principals.Or(CalculateCrossProductNodeSets( + localGroupData, enterpriseCAEnrollers, principalsWithEnrollOrAllExtendedRights.Slice(), principalsWithPKINameFlag.Slice(), @@ -97,8 +96,8 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi // 2e. if enrolleeSuppliesSubject { principals.Or( - CalculateCrossProductNodeSets(tx, - groupExpansions, + CalculateCrossProductNodeSets( + localGroupData, enterpriseCAEnrollers, principalsWithEnrollOrAllExtendedRights.Slice(), principalsWithPKIEnrollmentFlag.Slice(), @@ -109,8 +108,8 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi // 2f. if !requiresManagerApproval { principals.Or( - CalculateCrossProductNodeSets(tx, - groupExpansions, + CalculateCrossProductNodeSets( + localGroupData, enterpriseCAEnrollers, principalsWithEnrollOrAllExtendedRights.Slice(), principalsWithPKINameFlag.Slice(), diff --git a/packages/go/analysis/ad/esc6.go b/packages/go/analysis/ad/esc6.go index 67acd427bc5..9198bec5f3e 100644 --- a/packages/go/analysis/ad/esc6.go +++ b/packages/go/analysis/ad/esc6.go @@ -25,7 +25,6 @@ import ( "github.com/specterops/bloodhound/packages/go/ein" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -35,7 +34,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if isUserSpecifiesSanEnabledCollected, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabledCollected.String()).Bool(); err != nil { return err } else if !isUserSpecifiesSanEnabledCollected { @@ -57,7 +56,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys } else if !valid { continue } else { - enrollers := CalculateCrossProductNodeSets(tx, groupExpansions, cache.GetCertTemplateEnrollers(publishedCertTemplate.ID), enterpriseCAEnrollers) + enrollers := CalculateCrossProductNodeSets(localGroupData, cache.GetCertTemplateEnrollers(publishedCertTemplate.ID), enterpriseCAEnrollers) if filteredEnrollers, err := filterUserDNSResults(tx, enrollers, publishedCertTemplate); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error filtering users in ESC6a: %v", err)) @@ -80,7 +79,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys return nil } -func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if isUserSpecifiesSanEnabledCollected, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabledCollected.String()).Bool(); err != nil { return err } else if !isUserSpecifiesSanEnabledCollected { @@ -102,7 +101,7 @@ func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analys } else if !valid { continue } else { - enrollers := CalculateCrossProductNodeSets(tx, groupExpansions, cache.GetCertTemplateEnrollers(publishedCertTemplate.ID), enterpriseCAEnrollers) + enrollers := CalculateCrossProductNodeSets(localGroupData, cache.GetCertTemplateEnrollers(publishedCertTemplate.ID), enterpriseCAEnrollers) if filteredEnrollers, err := filterUserDNSResults(tx, enrollers, publishedCertTemplate); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error filtering users in ESC6b: %v", err)) diff --git a/packages/go/analysis/ad/esc9.go b/packages/go/analysis/ad/esc9.go index 7d6083931f7..91f2bdf90ff 100644 --- a/packages/go/analysis/ad/esc9.go +++ b/packages/go/analysis/ad/esc9.go @@ -23,7 +23,6 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -33,7 +32,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { @@ -51,7 +50,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { - victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) + victimBitmap := getVictimBitmap(localGroupData, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if filteredVictims, err := filterUserDNSResults(tx, victimBitmap, template); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error filtering users from victims for esc9a: %v", err)) @@ -82,7 +81,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys } } -func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, groupExpansions impact.PathAggregator, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { @@ -100,7 +99,7 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys slog.DebugContext(ctx, fmt.Sprintf("Failed to retrieve enrollers for cert template %d from cache", template.ID)) continue } else { - victimBitmap := getVictimBitmap(groupExpansions, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) + victimBitmap := getVictimBitmap(localGroupData, certTemplateEnrollers, ecaEnrollers, cache.GetCertTemplateHasSpecialEnrollers(template.ID), cache.GetEnterpriseCAHasSpecialEnrollers(eca.ID)) if attackers, err := FetchAttackersForEscalations9and10(tx, victimBitmap, true); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Error getting start nodes for esc9a attacker nodes: %v", err)) diff --git a/packages/go/analysis/ad/esc_shared.go b/packages/go/analysis/ad/esc_shared.go index a6df1a5574b..7df941965db 100644 --- a/packages/go/analysis/ad/esc_shared.go +++ b/packages/go/analysis/ad/esc_shared.go @@ -25,7 +25,6 @@ import ( "strings" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/slicesext" "github.com/specterops/dawgs/cardinality" @@ -300,24 +299,26 @@ func findNodesByCertThumbprint(certThumbprint string, tx graph.Transaction, kind })) } -func expandNodeSliceToBitmapWithoutGroups(nodes []*graph.Node, groupExpansions impact.PathAggregator) cardinality.Duplex[uint64] { - var bitmap = cardinality.NewBitmap64() +func expandNodeSliceToBitmapWithoutGroups(nodes []*graph.Node, localGroupData *LocalGroupData) cardinality.Duplex[uint64] { + var ( + nonGroupNodes = cardinality.NewBitmap64() + ) for _, controller := range nodes { if controller.Kinds.ContainsOneOf(ad.Group) { - groupExpansions.Cardinality(controller.ID.Uint64()).(cardinality.Duplex[uint64]).Each(func(id uint64) bool { - //Check group expansions against each id, if cardinality is 0 than its not a group - if groupExpansions.Cardinality(id).Cardinality() == 0 { - bitmap.Add(id) + localGroupData.GroupMembershipCache.ReachOfComponentContainingMember(controller.ID.Uint64(), graph.DirectionInbound).Each(func(memberID uint64) bool { + if !localGroupData.Groups.Contains(memberID) { + nonGroupNodes.Add(memberID) } + return true }) } else { - bitmap.Add(controller.ID.Uint64()) + nonGroupNodes.Add(controller.ID.Uint64()) } } - return bitmap + return nonGroupNodes } func containsAuthUsersOrEveryone(tx graph.Transaction, nodes []*graph.Node) (bool, error) { @@ -378,11 +379,11 @@ func filterUserDNSResults(tx graph.Transaction, bitmap cardinality.Duplex[uint64 return bitmap, nil } -func getVictimBitmap(groupExpansions impact.PathAggregator, certTemplateControllers, ecaControllers []*graph.Node, specialGroupHasTemplateEnroll, specialGroupHasECAEnroll bool) cardinality.Duplex[uint64] { +func getVictimBitmap(localGroupData *LocalGroupData, certTemplateControllers, ecaControllers []*graph.Node, specialGroupHasTemplateEnroll, specialGroupHasECAEnroll bool) cardinality.Duplex[uint64] { // Expand controllers for the eca + template completely because we don't do group shortcutting here var ( - templateBitmap = expandNodeSliceToBitmapWithoutGroups(certTemplateControllers, groupExpansions) - ecaBitmap = expandNodeSliceToBitmapWithoutGroups(ecaControllers, groupExpansions) + templateBitmap = expandNodeSliceToBitmapWithoutGroups(certTemplateControllers, localGroupData) + ecaBitmap = expandNodeSliceToBitmapWithoutGroups(ecaControllers, localGroupData) victimBitmap = cardinality.NewBitmap64() ) diff --git a/packages/go/analysis/ad/local_groups.go b/packages/go/analysis/ad/local_groups.go new file mode 100644 index 00000000000..ce6bbcf2ca2 --- /dev/null +++ b/packages/go/analysis/ad/local_groups.go @@ -0,0 +1,363 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package ad + +import ( + "context" + "log/slog" + "runtime" + "sync" + "sync/atomic" + + "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/bhlog/attr" + "github.com/specterops/bloodhound/packages/go/bhlog/measure" + "github.com/specterops/bloodhound/packages/go/graphschema/ad" + "github.com/specterops/dawgs/graph" + "github.com/specterops/dawgs/util" + "github.com/specterops/dawgs/util/channels" +) + +func PostCanRDP(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData, enforceURA bool, citrixEnabled bool) (*analysis.AtomicPostProcessingStats, error) { + var ( + ctx, done = context.WithCancel(parentCtx) + stats = analysis.NewAtomicPostProcessingStats() + numComputersProcessed = &atomic.Uint64{} + workC = make(chan uint64) + workerWG sync.WaitGroup + computerC = make(chan *CanRDPComputerData) + computerWG sync.WaitGroup + postC = make(chan analysis.CreatePostRelationshipJob, 4096) + postWG sync.WaitGroup + + // Requirement for any CanRDP processing + canRDPData, err = localGroupData.FetchCanRDPData(ctx, graphDB) + ) + + // Ensure the internal operation context is closed out + defer done() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "PostCanRDP")() + + // If we didn't get the canRDPData then we can't run post + if err != nil { + return nil, err + } + + postWG.Add(1) + + go func() { + defer postWG.Done() + + relProperties := analysis.NewPropertiesWithLastSeen() + + if err := graphDB.BatchOperation(ctx, func(batch graph.Batch) error { + for { + nextPost, shouldContinue := channels.Receive(ctx, postC) + + if !shouldContinue { + break + } + + if err := batch.CreateRelationshipByIDs(nextPost.FromID, nextPost.ToID, nextPost.Kind, relProperties); err != nil { + return err + } + + stats.AddRelationshipsCreated(nextPost.Kind, 1) + } + + return nil + }); err != nil { + slog.Error("Write Computer CanRDP Post Processed Edge", attr.Error(err)) + done() + } + }() + + for workerID := 0; workerID < runtime.NumCPU()/2+1; workerID++ { + computerWG.Add(1) + + go func(workerID int) { + defer computerWG.Done() + + // Status output function for measuring progress in this post processing section + submitStatusf := util.SLogSampleRepeated("PostCanRDP") + + for { + nextComputerRDPJob, shouldContinue := channels.Receive(ctx, computerC) + + if !shouldContinue { + break + } + + rdpEntities, err := FetchCanRDPEntityBitmapForComputer(nextComputerRDPJob, enforceURA, citrixEnabled) + + if err != nil { + slog.Error("FetchCanRDPEntityBitmapForComputer Error", attr.Error(err)) + done() + } else { + rdpEntities.Each(func(fromID uint64) bool { + return channels.Submit(ctx, postC, analysis.CreatePostRelationshipJob{ + FromID: graph.ID(fromID), + ToID: nextComputerRDPJob.Computer, + Kind: ad.CanRDP, + }) + }) + } + + if numComputersProcessed.Add(1)%10_000 == 0 { + cacheStats := nextComputerRDPJob.GroupMembershipCache.Stats() + + submitStatusf( + slog.Uint64("num_computers", numComputersProcessed.Load()), + slog.Uint64("num_cached", cacheStats.Cached), + slog.Uint64("cache_hits", cacheStats.Hits), + ) + } + } + }(workerID) + } + + for workerID := 0; workerID < analysis.MaximumDatabaseParallelWorkers; workerID++ { + workerWG.Add(1) + + go func(workerID int) { + defer workerWG.Done() + + if err := graphDB.ReadTransaction(ctx, func(tx graph.Transaction) error { + for { + var ( + nextComputer, shouldContinue = channels.Receive(ctx, workC) + nextComputerID = graph.ID(nextComputer) + ) + + if !shouldContinue { + break + } + + if computerCanRDPData, err := canRDPData.FetchCanRDPComputerData(tx, nextComputerID); err != nil { + return err + } else if !channels.Submit(ctx, computerC, computerCanRDPData) { + break + } + } + + return nil + }); err != nil { + slog.Error("Fetching CanRDP Computer Data", attr.Error(err)) + done() + } + }(workerID) + } + + localGroupData.Computers.Each(func(nextComputer uint64) bool { + return channels.Submit(ctx, workC, nextComputer) + }) + + close(workC) + workerWG.Wait() + + close(computerC) + computerWG.Wait() + + close(postC) + postWG.Wait() + + return &stats, nil +} + +func PostLocalGroups(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { + const ( + adminGroupSuffix = "-544" + psRemoteGroupSuffix = "-580" + dcomGroupSuffix = "-562" + ) + + type reachJob struct { + targetComputer uint64 + targetGroup uint64 + groupSuffix string + } + + var ( + ctx, done = context.WithCancel(parentCtx) + stats = analysis.NewAtomicPostProcessingStats() + computerC = make(chan uint64) + reachC = make(chan reachJob, 4096) + postC = make(chan analysis.CreatePostRelationshipJob, 4096) + numGroupsProcessed = &atomic.Uint64{} + numComputersProcessed = &atomic.Uint64{} + submitStatusf = util.SLogSampleRepeated("PostLocalGroups") + + postWG sync.WaitGroup + reachWG sync.WaitGroup + fetchWG sync.WaitGroup + ) + + // Ensure the internal operation context is closed out + defer done() + defer measure.ContextMeasure(ctx, slog.LevelInfo, "PostLocalGroups")() + + postWG.Add(1) + + go func() { + defer postWG.Done() + + relProperties := analysis.NewPropertiesWithLastSeen() + + if err := graphDB.BatchOperation(ctx, func(batch graph.Batch) error { + for { + nextPost, shouldContinue := channels.Receive(ctx, postC) + + if !shouldContinue { + break + } + + if err := batch.CreateRelationshipByIDs(nextPost.FromID, nextPost.ToID, nextPost.Kind, relProperties); err != nil { + return err + } + + stats.AddRelationshipsCreated(nextPost.Kind, 1) + } + + return nil + }); err != nil { + slog.Error("Write Computer Local Group Post Processed Edge", attr.Error(err)) + done() + } + }() + + // Graph path workers + for workerID := 0; workerID < runtime.NumCPU()/2+1; workerID += 1 { + reachWG.Add(1) + + go func(workerID int) { + defer reachWG.Done() + + for { + var ( + nextJob, shouldContinue = channels.Receive(ctx, reachC) + edgeKind graph.Kind + ) + + if !shouldContinue { + break + } + + switch nextJob.groupSuffix { + case adminGroupSuffix: + edgeKind = ad.AdminTo + case psRemoteGroupSuffix: + edgeKind = ad.CanPSRemote + case dcomGroupSuffix: + edgeKind = ad.ExecuteDCOM + default: + continue + } + + localGroupData.LocalGroupMembershipDigraph.EachAdjacentNode(nextJob.targetGroup, graph.DirectionInbound, func(fromID uint64) bool { + return channels.Submit(ctx, postC, analysis.CreatePostRelationshipJob{ + FromID: graph.ID(fromID), + ToID: graph.ID(nextJob.targetComputer), + Kind: edgeKind, + }) + }) + } + }(workerID) + } + + for workerID := 0; workerID < analysis.MaximumDatabaseParallelWorkers; workerID += 1 { + fetchWG.Add(1) + + go func(workerID int) { + defer fetchWG.Done() + + if err := graphDB.ReadTransaction(ctx, func(tx graph.Transaction) error { + for { + computerID, shouldContinue := channels.Receive(ctx, computerC) + + if !shouldContinue { + break + } + + if localAdminGroup, err := FetchComputerLocalGroupIDBySIDSuffix(tx, graph.ID(computerID), adminGroupSuffix); err != nil { + if !graph.IsErrNotFound(err) { + return err + } + } else { + numGroupsProcessed.Add(1) + + channels.Submit(ctx, reachC, reachJob{ + targetComputer: computerID, + targetGroup: localAdminGroup.Uint64(), + groupSuffix: adminGroupSuffix, + }) + } + + if localPSRemoteGroup, err := FetchComputerLocalGroupIDBySIDSuffix(tx, graph.ID(computerID), psRemoteGroupSuffix); err != nil { + if !graph.IsErrNotFound(err) { + return err + } + } else { + numGroupsProcessed.Add(1) + + channels.Submit(ctx, reachC, reachJob{ + targetComputer: computerID, + targetGroup: localPSRemoteGroup.Uint64(), + groupSuffix: psRemoteGroupSuffix, + }) + } + + if localDCOMGroup, err := FetchComputerLocalGroupIDBySIDSuffix(tx, graph.ID(computerID), dcomGroupSuffix); err != nil { + if !graph.IsErrNotFound(err) { + return err + } + } else { + numGroupsProcessed.Add(1) + + channels.Submit(ctx, reachC, reachJob{ + targetComputer: computerID, + targetGroup: localDCOMGroup.Uint64(), + groupSuffix: dcomGroupSuffix, + }) + } + + if numComputersProcessed.Add(1)%10000 == 0 { + submitStatusf(slog.Uint64("num_computers", numComputersProcessed.Load())) + } + } + + return nil + }); err != nil { + slog.Error("Read Computer Local Groups", attr.Error(err)) + done() + } + }(workerID) + } + + localGroupData.Computers.Each(func(value uint64) bool { + return channels.Submit(ctx, computerC, value) + }) + + close(computerC) + fetchWG.Wait() + + close(reachC) + reachWG.Wait() + + close(postC) + postWG.Wait() + + return &stats, nil +} diff --git a/packages/go/analysis/ad/membership.go b/packages/go/analysis/ad/membership.go index 20275d22dd7..32bed6c440d 100644 --- a/packages/go/analysis/ad/membership.go +++ b/packages/go/analysis/ad/membership.go @@ -19,100 +19,14 @@ package ad import ( "context" "fmt" - "log/slog" "github.com/specterops/bloodhound/packages/go/analysis" - "github.com/specterops/bloodhound/packages/go/analysis/impact" - "github.com/specterops/bloodhound/packages/go/bhlog/measure" - "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" - "github.com/specterops/dawgs/ops" "github.com/specterops/dawgs/query" "github.com/specterops/dawgs/traversal" ) -func ResolveAllGroupMemberships(ctx context.Context, db graph.Database, additionalCriteria ...graph.Criteria) (impact.PathAggregator, error) { - defer measure.ContextMeasure(ctx, slog.LevelInfo, "ResolveAllGroupMemberships")() - - var ( - adGroupIDs []graph.ID - - searchCriteria = []graph.Criteria{query.KindIn(query.Relationship(), ad.MemberOf, ad.MemberOfLocalGroup)} - traversalMap = cardinality.ThreadSafeDuplex(cardinality.NewBitmap64()) - traversalInst = traversal.New(db, analysis.MaximumDatabaseParallelWorkers) - memberships = impact.NewThreadSafeAggregator(impact.NewAggregator(func() cardinality.Provider[uint64] { - return cardinality.NewBitmap64() - })) - ) - - if len(additionalCriteria) > 0 { - searchCriteria = append(searchCriteria, additionalCriteria...) - } - - if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { - if fetchedGroups, err := ops.FetchNodeIDs(tx.Nodes().Filter( - query.KindIn(query.Node(), ad.Group, ad.LocalGroup), - )); err != nil { - return err - } else { - adGroupIDs = fetchedGroups - return nil - } - }); err != nil { - return memberships, err - } - - slog.InfoContext(ctx, fmt.Sprintf("Collected %d groups to resolve", len(adGroupIDs))) - - for _, adGroupID := range adGroupIDs { - if traversalMap.Contains(adGroupID.Uint64()) { - continue - } - - if err := traversalInst.BreadthFirst(ctx, traversal.Plan{ - Root: graph.NewNode(adGroupID, graph.NewProperties(), ad.Entity, ad.Group), - Driver: func(ctx context.Context, tx graph.Transaction, segment *graph.PathSegment) ([]*graph.PathSegment, error) { - if nextQuery, err := newTraversalQuery(tx, segment, graph.DirectionInbound, searchCriteria...); err != nil { - return nil, err - } else { - var nextSegments []*graph.PathSegment - - if err := nextQuery.FetchTriples(func(cursor graph.Cursor[graph.RelationshipTripleResult]) error { - for next := range cursor.Chan() { - nextSegment := segment.Descend( - graph.NewNode(next.StartID, graph.NewProperties()), - graph.NewRelationship(next.ID, next.StartID, next.StartID, graph.NewProperties(), ad.MemberOf), - ) - - if traversalMap.CheckedAdd(next.StartID.Uint64()) { - nextSegments = append(nextSegments, nextSegment) - } else { - memberships.AddShortcut(nextSegment) - } - } - - return cursor.Error() - }); err != nil { - return nil, err - } - - // Is this path terminal? - if len(nextSegments) == 0 { - memberships.AddPath(segment) - } - - return nextSegments, nil - } - }, - }); err != nil { - return nil, err - } - } - - return memberships, nil -} - func newTraversalQuery(tx graph.Transaction, segment *graph.PathSegment, direction graph.Direction, queryCriteria ...graph.Criteria) (graph.RelationshipQuery, error) { var ( traversalCriteria []graph.Criteria @@ -172,13 +86,11 @@ func FetchPathMembers(ctx context.Context, db graph.Database, root graph.ID, dir Driver: func(ctx context.Context, tx graph.Transaction, segment *graph.PathSegment) ([]*graph.PathSegment, error) { if nextQuery, err := newTraversalQuery(tx, segment, direction, queryCriteria...); err != nil { return nil, err - } else if reverseDirection, err := direction.Reverse(); err != nil { - return nil, err } else { var nextSegments []*graph.PathSegment return nextSegments, nextQuery.FetchDirection( - reverseDirection, + direction.Reverse(), func(cursor graph.Cursor[graph.DirectionalResult]) error { for next := range cursor.Chan() { nextSegment := segment.Descend(next.Node, next.Relationship) diff --git a/packages/go/analysis/ad/ntlm.go b/packages/go/analysis/ad/ntlm.go index 312962cbd50..33bbf79b9c7 100644 --- a/packages/go/analysis/ad/ntlm.go +++ b/packages/go/analysis/ad/ntlm.go @@ -28,7 +28,6 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -43,7 +42,7 @@ type NTLMCache struct { ProtectedUsersCache map[string]cardinality.Duplex[uint64] LdapCache map[string]LDAPSigningCache UnprotectedComputersCache cardinality.Duplex[uint64] - GroupExpansions impact.PathAggregator + LocalGroupData *LocalGroupData } func (s NTLMCache) GetAuthenticatedUserGroupForDomain(domainSid string) (graph.ID, bool) { @@ -61,7 +60,7 @@ func (s NTLMCache) GetLdapCacheForDomain(domainSid string) (LDAPSigningCache, bo return cache, ok } -func NewNTLMCache(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (NTLMCache, error) { +func NewNTLMCache(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (NTLMCache, error) { var ( ntlmCache = NTLMCache{} unprotectedComputerCache = make(map[string]cardinality.Duplex[uint64]) @@ -72,7 +71,7 @@ func NewNTLMCache(ctx context.Context, db graph.Database, groupExpansions impact // Fetch all nodes where the node is a Group and is an Authenticated User if innerAuthenticatedUsersCache, err := FetchAuthUsersMappedToDomains(tx); err != nil { return err - } else if innerProtectedUsersCache, err := FetchProtectedUsersMappedToDomains(ctx, db, groupExpansions); err != nil { + } else if innerProtectedUsersCache, err := FetchProtectedUsersMappedToDomains(ctx, db, localGroupData); err != nil { return err } else if ldapSigningCache, err := FetchLDAPSigningCache(ctx, db); err != nil { return err @@ -80,7 +79,7 @@ func NewNTLMCache(ctx context.Context, db graph.Database, groupExpansions impact ntlmCache.AuthenticatedUsersCache = innerAuthenticatedUsersCache ntlmCache.LdapCache = ldapSigningCache ntlmCache.ProtectedUsersCache = innerProtectedUsersCache - ntlmCache.GroupExpansions = groupExpansions + ntlmCache.LocalGroupData = localGroupData // Fetch all nodes where the type is Computer and build out a cache of computers that are acceptable target/victims for coercion return tx.Nodes().Filter(query.Kind(query.Node(), ad.Computer)).Fetch(func(cursor graph.Cursor[*graph.Node]) error { @@ -125,7 +124,7 @@ func NewNTLMCache(ctx context.Context, db graph.Database, groupExpansions impact } // PostNTLM is the initial function used to execute our NTLM analysis -func PostNTLM(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator, adcsCache ADCSCache, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*analysis.AtomicPostProcessingStats, error) { +func PostNTLM(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsCache ADCSCache, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*analysis.AtomicPostProcessingStats, error) { var ( operation = analysis.NewPostRelationshipOperation(ctx, db, "PostNTLM") // compositionChannel = make(chan analysis.CompositionInfo) @@ -163,7 +162,7 @@ func PostNTLM(ctx context.Context, db graph.Database, groupExpansions impact.Pat // TODO: after adding all of our new NTLM edges, benchmark performance between submitting multiple readers per computer or single reader per computer // First fetch pre-reqs + find all vulnerable computers that are not protected - if ntlmCache, err := NewNTLMCache(ctx, db, groupExpansions); err != nil { + if ntlmCache, err := NewNTLMCache(ctx, db, localGroupData); err != nil { operation.Done() return nil, err } else if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { @@ -440,8 +439,8 @@ func PostCoerceAndRelayNTLMToADCS(adcsCache ADCSCache, operation analysis.StatTr } else { // Find all enrollers with enrollment rights on the cert template and the enterprise CA (no shortcutting) var ( - templateBitmap = expandNodeSliceToBitmapWithoutGroups(certTemplateEnrollers, ntlmCache.GroupExpansions) - ecaBitmap = expandNodeSliceToBitmapWithoutGroups(ecaEnrollers, ntlmCache.GroupExpansions) + templateBitmap = expandNodeSliceToBitmapWithoutGroups(certTemplateEnrollers, ntlmCache.LocalGroupData) + ecaBitmap = expandNodeSliceToBitmapWithoutGroups(ecaEnrollers, ntlmCache.LocalGroupData) enrollersBitmap = cardinality.NewBitmap64() specialGroupHasECAEnroll = adcsCache.GetEnterpriseCAHasSpecialEnrollers(enterpriseCA.ID) specialGroupHasTemplateEnroll = adcsCache.GetCertTemplateHasSpecialEnrollers(certTemplate.ID) @@ -586,7 +585,7 @@ func PostCoerceAndRelayNTLMToSMB(tx graph.Transaction, outC chan<- analysis.Crea allAdminPrincipals := cardinality.NewBitmap64() for _, principal := range firstDegreeAdmins.Slice() { if principal.Kinds.ContainsOneOf(ad.Group) { - allAdminPrincipals.Or(ntlmCache.GroupExpansions.Cardinality(principal.ID.Uint64())) + ntlmCache.LocalGroupData.GroupMembershipCache.OrReach(principal.ID.Uint64(), graph.DirectionInbound, allAdminPrincipals) } else { allAdminPrincipals.Add(principal.ID.Uint64()) } @@ -853,7 +852,7 @@ func FetchAuthUsersMappedToDomains(tx graph.Transaction) (map[string]graph.ID, e } // FetchProtectedUsersMappedToDomains fetches all protected users groups mapped by their domain SID -func FetchProtectedUsersMappedToDomains(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (map[string]cardinality.Duplex[uint64], error) { +func FetchProtectedUsersMappedToDomains(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (map[string]cardinality.Duplex[uint64], error) { protectedUsers := make(map[string]cardinality.Duplex[uint64]) err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { @@ -867,7 +866,7 @@ func FetchProtectedUsersMappedToDomains(ctx context.Context, db graph.Database, continue } else { set := cardinality.NewBitmap64() - set.Or(groupExpansions.Cardinality(protectedUserGroup.ID.Uint64())) + localGroupData.GroupMembershipCache.OrReach(protectedUserGroup.ID.Uint64(), graph.DirectionInbound, set) protectedUsers[domain] = set } } diff --git a/packages/go/analysis/ad/owns.go b/packages/go/analysis/ad/owns.go index 0127e536439..31dbd5c9ca9 100644 --- a/packages/go/analysis/ad/owns.go +++ b/packages/go/analysis/ad/owns.go @@ -21,11 +21,11 @@ import ( "errors" "log/slog" + "github.com/specterops/dawgs/algo" "github.com/specterops/dawgs/util/channels" "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" - "github.com/specterops/bloodhound/packages/go/analysis/impact" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -35,14 +35,14 @@ import ( "github.com/specterops/dawgs/query" ) -func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (*analysis.AtomicPostProcessingStats, error) { +func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { operation := analysis.NewPostRelationshipOperation(ctx, db, "PostOwnsAndWriteOwner") // Get the dSHeuristics values for all domains if dsHeuristicsCache, anyEnforced, err := GetDsHeuristicsCache(ctx, db); err != nil { slog.ErrorContext(ctx, "Failed fetching dsheuristics values for postownsandwriteowner", attr.Error(err)) return nil, err - } else if adminGroupIds, err := FetchAdminGroupIds(ctx, db, groupExpansions); err != nil { + } else if adminGroupIds, err := FetchAdminGroupIds(ctx, db, localGroupData.GroupMembershipCache); err != nil { // Get the admin group IDs slog.ErrorContext(ctx, "Failed fetching admin group ids values for postownsandwriteowner", attr.Error(err)) } else { @@ -193,7 +193,7 @@ func isTargetNodeComputerDerived(node *graph.Node) (bool, error) { } } -func FetchAdminGroupIds(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (cardinality.Duplex[uint64], error) { +func FetchAdminGroupIds(ctx context.Context, db graph.Database, groupExpansions *algo.ReachabilityCache) (cardinality.Duplex[uint64], error) { adminIds := cardinality.NewBitmap64() return adminIds, db.ReadTransaction(ctx, func(tx graph.Transaction) error { @@ -205,7 +205,7 @@ func FetchAdminGroupIds(ctx context.Context, db graph.Database, groupExpansions ).FetchIDs(func(cursor graph.Cursor[graph.ID]) error { for id := range cursor.Chan() { adminIds.Add(id.Uint64()) - adminIds.Or(groupExpansions.Cardinality(id.Uint64())) + groupExpansions.OrReach(id.Uint64(), graph.DirectionInbound, adminIds) } return cursor.Error() diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index 3812a43b7e4..00d6cf30476 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -21,20 +21,21 @@ import ( "fmt" "log/slog" - "github.com/RoaringBitmap/roaring/v2/roaring64" "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" - "github.com/specterops/bloodhound/packages/go/analysis/impact" + "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" + "github.com/specterops/dawgs/algo" "github.com/specterops/dawgs/cardinality" + "github.com/specterops/dawgs/container" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/ops" "github.com/specterops/dawgs/query" "github.com/specterops/dawgs/util/channels" ) -func PostSyncLAPSPassword(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (*analysis.AtomicPostProcessingStats, error) { +func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { if domainNodes, err := fetchCollectedDomainNodes(ctx, db); err != nil { return &analysis.AtomicPostProcessingStats{}, err } else { @@ -42,7 +43,7 @@ func PostSyncLAPSPassword(ctx context.Context, db graph.Database, groupExpansion for _, domain := range domainNodes { innerDomain := domain operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if lapsSyncers, err := getLAPSSyncers(tx, innerDomain, groupExpansions); err != nil { + if lapsSyncers, err := getLAPSSyncers(tx, innerDomain, localGroupData); err != nil { return err } else if lapsSyncers.Cardinality() == 0 { return nil @@ -69,7 +70,7 @@ func PostSyncLAPSPassword(ctx context.Context, db graph.Database, groupExpansion } } -func PostDCSync(ctx context.Context, db graph.Database, groupExpansions impact.PathAggregator) (*analysis.AtomicPostProcessingStats, error) { +func PostDCSync(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { if domainNodes, err := fetchCollectedDomainNodes(ctx, db); err != nil { return &analysis.AtomicPostProcessingStats{}, err } else { @@ -78,7 +79,7 @@ func PostDCSync(ctx context.Context, db graph.Database, groupExpansions impact.P for _, domain := range domainNodes { innerDomain := domain operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if dcSyncers, err := getDCSyncers(tx, innerDomain, groupExpansions); err != nil { + if dcSyncers, err := getDCSyncers(tx, innerDomain, localGroupData); err != nil { return err } else if dcSyncers.Cardinality() == 0 { return nil @@ -182,13 +183,38 @@ func PostHasTrustKeys(ctx context.Context, db graph.Database) (*analysis.AtomicP } } -func FetchComputers(ctx context.Context, db graph.Database) (*roaring64.Bitmap, error) { - computerNodeIds := roaring64.NewBitmap() +// FetchNodeIDsByKind fetches a bitmap of node IDs where each node has at least one kind assignment +// that matches the given kind. +func FetchNodeIDsByKind(tx graph.Transaction, targetKind graph.Kind) (cardinality.Duplex[uint64], error) { + defer measure.LogAndMeasure(slog.LevelInfo, "FetchNodeIDsByKind", slog.String("kind", targetKind.String()))() + + nodes := cardinality.NewBitmap64() + + if err := tx.Nodes().Filterf(func() graph.Criteria { + return query.Kind(query.Node(), targetKind) + }).FetchIDs(func(cursor graph.Cursor[graph.ID]) error { + for id := range cursor.Chan() { + nodes.Add(id.Uint64()) + } + + return cursor.Error() + }); err != nil { + return nil, err + } + + return nodes, nil +} + +func FetchAdminGroups(ctx context.Context, db graph.Database) (cardinality.Duplex[uint64], error) { + computerNodeIds := cardinality.NewBitmap64() return computerNodeIds, db.ReadTransaction(ctx, func(tx graph.Transaction) error { - return tx.Nodes().Filterf(func() graph.Criteria { - return query.Kind(query.Node(), ad.Computer) - }).FetchIDs(func(cursor graph.Cursor[graph.ID]) error { + return tx.Nodes().Filter(query.And( + query.Or( + query.StringEndsWith(query.StartProperty(common.ObjectID.String()), wellknown.AdministratorsSIDSuffix.String()), + query.StringEndsWith(query.EndProperty(common.ObjectID.String()), wellknown.AdministratorsSIDSuffix.String()), + ), + )).FetchIDs(func(cursor graph.Cursor[graph.ID]) error { for id := range cursor.Chan() { computerNodeIds.Add(id.Uint64()) } @@ -257,7 +283,8 @@ func getTrustAccount(tx graph.Transaction, domainSid, netbios string) (*graph.No } return nodes[0], err } -func getLAPSSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions impact.PathAggregator) (cardinality.Duplex[uint64], error) { + +func getLAPSSyncers(tx graph.Transaction, domain *graph.Node, localGroupData *LocalGroupData) (cardinality.Duplex[uint64], error) { var ( getChangesQuery = analysis.FromEntityToEntityWithRelationshipKind(tx, domain, ad.GetChanges) getChangesFilteredQuery = analysis.FromEntityToEntityWithRelationshipKind(tx, domain, ad.GetChangesInFilteredSet) @@ -268,13 +295,13 @@ func getLAPSSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions im } else if getChangesFilteredNodes, err := ops.FetchStartNodes(getChangesFilteredQuery); err != nil { return nil, err } else { - results := CalculateCrossProductNodeSets(tx, groupExpansions, getChangesNodes.Slice(), getChangesFilteredNodes.Slice()) + results := CalculateCrossProductNodeSets(localGroupData, getChangesNodes.Slice(), getChangesFilteredNodes.Slice()) return results, nil } } -func getDCSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions impact.PathAggregator) (cardinality.Duplex[uint64], error) { +func getDCSyncers(tx graph.Transaction, domain *graph.Node, localGroupData *LocalGroupData) (cardinality.Duplex[uint64], error) { var ( getChangesQuery = analysis.FromEntityToEntityWithRelationshipKind(tx, domain, ad.GetChanges) getChangesAllQuery = analysis.FromEntityToEntityWithRelationshipKind(tx, domain, ad.GetChangesAll) @@ -285,7 +312,7 @@ func getDCSyncers(tx graph.Transaction, domain *graph.Node, groupExpansions impa } else if getChangesAllNodes, err := ops.FetchStartNodes(getChangesAllQuery); err != nil { return nil, err } else { - results := CalculateCrossProductNodeSets(tx, groupExpansions, getChangesNodes.Slice(), getChangesAllNodes.Slice()) + results := CalculateCrossProductNodeSets(localGroupData, getChangesNodes.Slice(), getChangesAllNodes.Slice()) return results, nil } @@ -335,163 +362,79 @@ func getAdminSDHolderProtected(tx graph.Transaction, domain *graph.Node) ([]grap } } -func PostLocalGroups(ctx context.Context, db graph.Database, localGroupExpansions impact.PathAggregator, enforceURA bool, citrixEnabled bool) (*analysis.AtomicPostProcessingStats, error) { +// Fetches a LocalGroup belonging to the given computer by the given LocalGroup SID suffix. +func FetchComputerLocalGroupBySIDSuffix(tx graph.Transaction, computer graph.ID, groupSuffix string) (*graph.Node, error) { var ( - adminGroupSuffix = "-544" - psRemoteGroupSuffix = "-580" - dcomGroupSuffix = "-562" - ) - - if computers, err := FetchComputers(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err - } else { - var ( - threadSafeLocalGroupExpansions = impact.NewThreadSafeAggregator(localGroupExpansions) - operation = analysis.NewPostRelationshipOperation(ctx, db, "LocalGroup Post Processing") - ) - - for idx, computer := range computers.ToArray() { - computerID := graph.ID(computer) - - if idx > 0 && idx%10000 == 0 { - slog.InfoContext(ctx, fmt.Sprintf("Post processed %d active directory computers", idx)) - } - - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if entities, err := FetchLocalGroupBitmapForComputer(tx, computerID, dcomGroupSuffix); err != nil { - return err - } else { - for _, admin := range entities.Slice() { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(admin), - ToID: computerID, - Kind: ad.ExecuteDCOM, - } + groupNode graph.Node + err = tx.Relationships().Filter(query.And( + query.StringEndsWith(query.StartProperty(common.ObjectID.String()), groupSuffix), + query.Kind(query.Relationship(), ad.LocalToComputer), + query.InIDs(query.EndID(), computer), + )).Query( + func(results graph.Result) error { + defer results.Close() - if !channels.Submit(ctx, outC, nextJob) { - return nil - } + if results.Next() { + if err := results.Scan(&groupNode); err != nil { + return err } - - return nil - } - }); err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed submitting reader for operation involving computer %d: %w", computerID, err) - } - - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if entities, err := FetchLocalGroupBitmapForComputer(tx, computerID, psRemoteGroupSuffix); err != nil { - return err } else { - for _, admin := range entities.Slice() { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(admin), - ToID: computerID, - Kind: ad.CanPSRemote, - } - - if !channels.Submit(ctx, outC, nextJob) { - return nil - } - } - - return nil + return graph.ErrNoResultsFound } - }); err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed submitting reader for operation involving computer %d: %w", computerID, err) - } - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if entities, err := FetchLocalGroupBitmapForComputer(tx, computerID, adminGroupSuffix); err != nil { - return err - } else { - for _, admin := range entities.Slice() { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(admin), - ToID: computerID, - Kind: ad.AdminTo, - } + return results.Error() + }, + query.Returning( + query.Start(), + ), + ) + ) - if !channels.Submit(ctx, outC, nextJob) { - return nil - } - } + if err != nil { + return nil, err + } - return nil - } - }); err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed submitting reader for operation involving computer %d: %w", computerID, err) - } + return &groupNode, nil +} - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if entities, err := FetchCanRDPEntityBitmapForComputer(tx, computerID, threadSafeLocalGroupExpansions, enforceURA, citrixEnabled); err != nil { - return err - } else { - for _, rdp := range entities.Slice() { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(rdp), - ToID: computerID, - Kind: ad.CanRDP, - } +// FetchComputerLocalGroupIDBySIDSuffix fetches a local group attached to the given computer with a SID suffix that matches +// the given suffix. +func FetchComputerLocalGroupIDBySIDSuffix(tx graph.Transaction, computer graph.ID, groupSuffix string) (graph.ID, error) { + var ( + startID graph.ID + err = tx.Relationships().Filter(query.And( + query.StringEndsWith(query.StartProperty(common.ObjectID.String()), groupSuffix), + query.Kind(query.Relationship(), ad.LocalToComputer), + query.InIDs(query.EndID(), computer), + )).Query( + func(results graph.Result) error { + defer results.Close() - if !channels.Submit(ctx, outC, nextJob) { - return nil - } + if results.Next() { + if err := results.Scan(&startID); err != nil { + return err } + } else { + return graph.ErrNoResultsFound } - return nil - }); err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed submitting reader for operation involving computer %d: %w", computerID, err) - } - } - - slog.InfoContext(ctx, fmt.Sprintf("Finished post-processing %d active directory computers", computers.GetCardinality())) - return &operation.Stats, operation.Done() - } -} - -func ExpandLocalGroupMembership(tx graph.Transaction, candidates graph.NodeSet) (graph.NodeSet, error) { - if paths, err := ExpandLocalGroupMembershipPaths(tx, candidates); err != nil { - return nil, err - } else { - return paths.AllNodes(), nil - } -} - -func ExpandLocalGroupMembershipPaths(tx graph.Transaction, candidates graph.NodeSet) (graph.PathSet, error) { - groupMemberPaths := graph.NewPathSet() - - for _, candidate := range candidates { - if candidate.Kinds.ContainsOneOf(ad.Group) { - if membershipPaths, err := ops.TraversePaths(tx, ops.TraversalPlan{ - Root: candidate, - Direction: graph.DirectionInbound, - BranchQuery: func() graph.Criteria { - return query.KindIn(query.Relationship(), ad.MemberOf, ad.MemberOfLocalGroup) - }, - }); err != nil { - return nil, err - } else { - groupMemberPaths.AddPathSet(membershipPaths) - } - } - } - - return groupMemberPaths, nil -} + return results.Error() + }, + query.Returning( + query.StartID(), + ), + ) + ) -func Uint64ToIDSlice(uint64IDs []uint64) []graph.ID { - ids := make([]graph.ID, len(uint64IDs)) - for idx := 0; idx < len(uint64IDs); idx++ { - ids[idx] = graph.ID(uint64IDs[idx]) + if err != nil { + return 0, err } - return ids + return startID, nil } -func ExpandGroupMembershipIDBitmap(tx graph.Transaction, group *graph.Node) (*roaring64.Bitmap, error) { - groupMembers := roaring64.NewBitmap() +func ExpandGroupMembershipIDBitmap(tx graph.Transaction, group *graph.Node) (cardinality.Duplex[uint64], error) { + groupMembers := cardinality.NewBitmap64() if membershipPaths, err := ops.TraversePaths(tx, ops.TraversalPlan{ Root: group, @@ -510,18 +453,7 @@ func ExpandGroupMembershipIDBitmap(tx graph.Transaction, group *graph.Node) (*ro return groupMembers, nil } -func FetchComputerLocalGroupBySIDSuffix(tx graph.Transaction, computer graph.ID, groupSuffix string) (*graph.Node, error) { - if rel, err := tx.Relationships().Filter(query.And( - query.StringEndsWith(query.StartProperty(common.ObjectID.String()), groupSuffix), - query.Kind(query.Relationship(), ad.LocalToComputer), - query.InIDs(query.EndID(), computer), - )).First(); err != nil { - return nil, err - } else { - return ops.FetchNode(tx, rel.StartID) - } -} - +// FetchComputerLocalGroupByName looks up a local group attacked to a given computer by its name. func FetchComputerLocalGroupByName(tx graph.Transaction, computer graph.ID, groupName string) (*graph.Node, error) { if rel, err := tx.Relationships().Filter( query.And( @@ -537,18 +469,21 @@ func FetchComputerLocalGroupByName(tx graph.Transaction, computer graph.ID, grou } } -func FetchLocalGroupMembership(tx graph.Transaction, computer graph.ID, groupSuffix string) (graph.NodeSet, error) { - if localGroup, err := FetchComputerLocalGroupBySIDSuffix(tx, computer, groupSuffix); err != nil { - return nil, err - } else { - return ops.FetchStartNodes(tx.Relationships().Filter(query.And( - query.KindIn(query.Start(), ad.User, ad.Group, ad.Computer), - query.Kind(query.Relationship(), ad.MemberOfLocalGroup), - query.InIDs(query.EndID(), localGroup.ID), - ))) - } +// FetchRemoteDesktopUsersBitmapForComputerWithoutURA uses the cached local group information in the passed CanRDPComputerData +// struct to compute the membership of the computer's "Remote Desktop Users" local group. This membership is returned +// as a bitmap. +func FetchRemoteDesktopUsersBitmapForComputerWithoutURA(canRDPData *CanRDPComputerData) cardinality.Duplex[uint64] { + adjacentNodes := container.AdjacentNodes( + canRDPData.LocalGroupMembershipDigraph, + canRDPData.RemoteDesktopUsersLocalGroup.ID.Uint64(), + graph.DirectionInbound, + ) + + return cardinality.NewBitmap64With(adjacentNodes...) } +// FetchRemoteInteractiveLogonRightEntities expands all RemoteInteractiveLogonRight to a given computer and returns the +// nodes as a set. func FetchRemoteInteractiveLogonRightEntities(tx graph.Transaction, computerId graph.ID) (graph.NodeSet, error) { return ops.FetchStartNodes(tx.Relationships().Filterf(func() graph.Criteria { return query.And( @@ -558,133 +493,279 @@ func FetchRemoteInteractiveLogonRightEntities(tx graph.Transaction, computerId g })) } -func HasRemoteInteractiveLogonRight(tx graph.Transaction, groupId, computerId graph.ID) bool { - if _, err := tx.Relationships().Filterf(func() graph.Criteria { - return query.And( - query.Equals(query.StartID(), groupId), - query.Equals(query.EndID(), computerId), - query.Kind(query.Relationship(), ad.RemoteInteractiveLogonRight), - ) - }).First(); err != nil { - return false - } +// FetchCanRDPEntityBitmapForComputer computes eligible nodes (aggregated into a bitmap) for the given computer +// in the passed CanRDPComputerData struct. +func FetchCanRDPEntityBitmapForComputer(computerData *CanRDPComputerData, enforceURA bool, citrixEnabled bool) (cardinality.Duplex[uint64], error) { + var ( + uraEnabled = enforceURA || computerData.ComputersWithURA.Contains(computerData.Computer.Uint64()) - return true -} + // Shortcut opportunity when citrix is disabled: see if the RDP group has RIL privilege. If + // it does, get the first degree members and return those ids, since everything in RDP group + // has CanRDP privs. No reason to look any further. + canSkipURAProcessing = !uraEnabled || computerData.HasRemoteInteractiveLogonRight() + ) -func FetchLocalGroupBitmapForComputer(tx graph.Transaction, computer graph.ID, suffix string) (cardinality.Duplex[uint64], error) { - if members, err := FetchLocalGroupMembership(tx, computer, suffix); err != nil { - if graph.IsErrNotFound(err) { - return cardinality.NewBitmap64(), nil + if citrixEnabled { + if computerData.DAUGroup == nil { + // "Direct Access Users" is a group that Citrix creates. If the group does not exist, then the computer does not have Citrix installed and post-processing logic can continue by enumerating the "Remote Desktop Users" AD group. + if canSkipURAProcessing { + return FetchRemoteDesktopUsersBitmapForComputerWithoutURA(computerData), nil + } else { + return FetchRemoteDesktopUsersBitmapForComputerWithURA(computerData) + } } - return nil, err + if !uraEnabled { + // In cases where we do not need to check for the existence of the RIL privilege, return the cross product of both groups + return CalculateCrossProductNodeSets(&computerData.LocalGroupData, []*graph.Node{computerData.RemoteDesktopUsersLocalGroup}, []*graph.Node{computerData.DAUGroup}), nil + } else { + // Otherwise, return the cross product of all three criteria + return CalculateCrossProductNodeSets(&computerData.LocalGroupData, []*graph.Node{computerData.RemoteDesktopUsersLocalGroup}, []*graph.Node{computerData.DAUGroup}, computerData.RemoteInteractiveLogonRightEntities.Slice()), nil + } + } else if canSkipURAProcessing { + return FetchRemoteDesktopUsersBitmapForComputerWithoutURA(computerData), nil } else { - return graph.NodeSetToDuplex(members), nil + return FetchRemoteDesktopUsersBitmapForComputerWithURA(computerData) } } -func ExpandAllRDPLocalGroups(ctx context.Context, db graph.Database) (impact.PathAggregator, error) { - slog.InfoContext(ctx, "Expanding all AD group and local group memberships") +// FetchComputersWithURA fetches all computers with the "hasura" property set to true and +// aggregates the computer IDs into a bitmap. +func FetchComputersWithURA(tx graph.Transaction) (cardinality.Duplex[uint64], error) { + defer measure.LogAndMeasure(slog.LevelInfo, "FetchComputersWithURA")() - return ResolveAllGroupMemberships(ctx, db, query.Not( - query.Or( - query.StringEndsWith(query.StartProperty(common.ObjectID.String()), wellknown.AdministratorsSIDSuffix.String()), - query.StringEndsWith(query.EndProperty(common.ObjectID.String()), wellknown.AdministratorsSIDSuffix.String()), - ), - )) -} + nodesWithURA := cardinality.NewBitmap64() -func FetchCanRDPEntityBitmapForComputer(tx graph.Transaction, computer graph.ID, localGroupExpansions impact.PathAggregator, enforceURA bool, citrixEnabled bool) (cardinality.Duplex[uint64], error) { - var ( - uraEnabled = enforceURA || ComputerHasURACollection(tx, computer) - rdpGroup, err = FetchComputerLocalGroupBySIDSuffix(tx, computer, wellknown.RemoteDesktopUsersSIDSuffix.String()) - ) + if err := tx.Nodes().Filter( + query.And( + query.Kind(query.Node(), ad.Computer), + query.Equals(query.NodeProperty(ad.HasURA.String()), true), + ), + ).Query(func(results graph.Result) error { + for results.Next() { + var ( + nodeID graph.ID + propertyValue bool + ) - if err != nil { - if graph.IsErrNotFound(err) { - return cardinality.NewBitmap64(), nil + if err := results.Scan(&nodeID, &propertyValue); err != nil { + return err + } else if propertyValue { + nodesWithURA.Add(nodeID.Uint64()) + } } + + return results.Error() + }, query.Returning( + query.NodeID(), + query.NodeProperty(ad.HasURA.String()), + )); err != nil { return nil, err } - // Shortcut opportunity when citrix is disabled: see if the RDP group has RIL privilege. If it does, get the first degree members and return those ids, since everything in RDP group has CanRDP privs. No reason to look any further - canSkipURAProcessing := !uraEnabled || HasRemoteInteractiveLogonRight(tx, rdpGroup.ID, computer) + return nodesWithURA, nil +} - if citrixEnabled { - if dauGroup, err := FetchComputerLocalGroupByName(tx, computer, "Direct Access Users"); err != nil { - // "Direct Access Users" is a group that Citrix creates. If the group does not exist, then the computer does not have Citrix installed and post-processing logic can continue by enumerating the "Remote Desktop Users" AD group. - if graph.IsErrNotFound(err) { - return FetchRemoteDesktopUsersBitmapForComputer(tx, computer, localGroupExpansions, rdpGroup, canSkipURAProcessing) - } +// LocalGroupData contains data common to AD local group and domain group post-processing business logic. This allows for +// business logic to avoid database interactions. +type LocalGroupData struct { + // All computer IDs in all domains + Computers cardinality.Duplex[uint64] - return nil, err - } else if !uraEnabled { - // In cases where we do not need to check for the existence of the RIL privilege, return the cross product of both groups - return CalculateCrossProductNodeSets(tx, localGroupExpansions, []*graph.Node{rdpGroup}, []*graph.Node{dauGroup}), nil - } else if baseRilEntities, err := FetchRemoteInteractiveLogonRightEntities(tx, computer); err != nil { - return nil, err + // All group IDs in all domains + Groups cardinality.Duplex[uint64] + + // All edges where: (:Base)-[:MemberOf|MemberOfLocalGroup*..]->(:Group|LocalGroup) + GroupMembershipCache *algo.ReachabilityCache + + // All edges where: (:Base)-[:MemberOfLocalGroup]->(:LocalGroup) + LocalGroupMembershipDigraph container.DirectedGraph + + // Contains groups that we want to stop post-processed edge propagation at, for example: EVERYONE@DOMAIN.COM + ExcludedShortcutGroups cardinality.Duplex[uint64] +} + +// FetchLocalGroupData access the given graph database and fetches all of the required data for LocalGroup post processing. +func FetchLocalGroupData(ctx context.Context, graphDB graph.Database) (*LocalGroupData, error) { + localGroupData := &LocalGroupData{} + + if err := graphDB.ReadTransaction(ctx, func(tx graph.Transaction) error { + if excludedGroups, err := FetchAuthUsersAndEveryoneGroups(tx); err != nil { + return err } else { - // Otherwise, return the cross product of all three criteria - return CalculateCrossProductNodeSets(tx, localGroupExpansions, []*graph.Node{rdpGroup}, []*graph.Node{dauGroup}, baseRilEntities.Slice()), nil + localGroupData.ExcludedShortcutGroups = excludedGroups.IDBitmap() + } + + if computerIDs, err := FetchNodeIDsByKind(tx, ad.Computer); err != nil { + return err + } else { + localGroupData.Computers = computerIDs } + + if allGroupIDs, err := FetchNodeIDsByKind(tx, ad.Group); err != nil { + return err + } else { + localGroupData.Groups = allGroupIDs + } + + return nil + }); err != nil { + return nil, err + } + + if groupMembershipCache, err := algo.FetchFilteredReachabilityCache(ctx, graphDB, ad.MemberOf, ad.MemberOfLocalGroup); err != nil { + return nil, err } else { - // When the citrix flag is disabled, fall back to our original implementation - return FetchRemoteDesktopUsersBitmapForComputer(tx, computer, localGroupExpansions, rdpGroup, canSkipURAProcessing) + localGroupData.GroupMembershipCache = groupMembershipCache } -} -func ComputerHasURACollection(tx graph.Transaction, computerID graph.ID) bool { - if computer, err := tx.Nodes().Filterf(func() graph.Criteria { - return query.Equals(query.NodeID(), computerID) - }).First(); err != nil { - return false + if localGroupMembershipDigraph, err := container.FetchFilteredDirectedGraph(ctx, graphDB, ad.MemberOfLocalGroup); err != nil { + return nil, err } else { - if ura, err := computer.Properties.Get(ad.HasURA.String()).Bool(); err != nil { - return false + localGroupData.LocalGroupMembershipDigraph = localGroupMembershipDigraph + } + + return localGroupData, nil +} + +// FetchCanRDPData access the given graph database and fetches all of the required data for +// CanRDP post processing that is not unqiue to a single computer. This allows for these data +// elements to be shared between post processing runs for each computer. +func (s *LocalGroupData) FetchCanRDPData(ctx context.Context, graphDB graph.Database) (*CanRDPData, error) { + components := &CanRDPData{ + LocalGroupData: *s, + } + + if err := graphDB.ReadTransaction(ctx, func(tx graph.Transaction) error { + if computersWithURA, err := FetchComputersWithURA(tx); err != nil { + return err } else { - return ura + components.ComputersWithURA = computersWithURA } + + if excludedGroups, err := FetchAuthUsersAndEveryoneGroups(tx); err != nil { + return err + } else { + components.ExcludedShortcutGroups = excludedGroups.IDBitmap() + } + + return nil + }); err != nil { + return components, err } -} -func FetchRemoteDesktopUsersBitmapForComputer(tx graph.Transaction, computer graph.ID, localGroupExpansions impact.PathAggregator, rdpGroup *graph.Node, skipURA bool) (cardinality.Duplex[uint64], error) { - if skipURA { - return FetchLocalGroupBitmapForComputer(tx, computer, wellknown.RemoteDesktopUsersSIDSuffix.String()) + if remoteInteractiveLogonRightDigraph, err := container.FetchFilteredDirectedGraph(ctx, graphDB, ad.RemoteInteractiveLogonRight); err != nil { + return components, err } else { - return ProcessRDPWithUra(tx, rdpGroup, computer, localGroupExpansions) + components.RemoteInteractiveLogonRightDigraph = remoteInteractiveLogonRightDigraph } + + return components, nil } -func ProcessRDPWithUra(tx graph.Transaction, rdpLocalGroup *graph.Node, computer graph.ID, localGroupExpansions impact.PathAggregator) (cardinality.Duplex[uint64], error) { - rdpLocalGroupMembers := localGroupExpansions.Cardinality(rdpLocalGroup.ID.Uint64()).(cardinality.Duplex[uint64]) +// CanRDPData contains data common to CanRDP post-processing business logic. This allows for +// business logic to avoid database interactions. +type CanRDPData struct { + LocalGroupData + + // Duplex of computer IDs that have the "hasura" property set to true + ComputersWithURA cardinality.Duplex[uint64] + + // All edges where: (:Base)-[:RemoteInteractiveLogonRight]->(:Computer) + RemoteInteractiveLogonRightDigraph container.DirectedGraph +} + +// CanRDPData contains data common to CanRDP post-processing business logic for a single computer. This allows for +// business logic to avoid database interactions. +type CanRDPComputerData struct { + CanRDPData + + // Computer ID being analyzed + Computer graph.ID + + // The Citrix "Direct Access Users" group + DAUGroup *graph.Node + + // The "Remote Desktop Users" LocalGroup attached to this computer + RemoteDesktopUsersLocalGroup *graph.Node + + // Nodes that have a RemoteInteractiveLogonRight edge inbound to this computer + RemoteInteractiveLogonRightEntities graph.NodeSet +} + +// FetchCanRDPComputerData uses the given transaction to fetch all computer-specific data related to the given computer +// that is required to compute the computer's inbound CanRDP edges. +func (s *CanRDPData) FetchCanRDPComputerData(tx graph.Transaction, computer graph.ID) (*CanRDPComputerData, error) { + computerData := &CanRDPComputerData{ + CanRDPData: *s, + Computer: computer, + } + + if dauGroup, err := FetchComputerLocalGroupByName(tx, computer, "Direct Access Users"); err != nil { + if !graph.IsErrNotFound(err) { + return nil, err + } + } else { + computerData.DAUGroup = dauGroup + } - if baseRilEntities, err := FetchRemoteInteractiveLogonRightEntities(tx, computer); err != nil { + if remoteDesktopUsersLocalGroup, err := FetchComputerLocalGroupBySIDSuffix(tx, computer, wellknown.RemoteDesktopUsersSIDSuffix.String()); err != nil { return nil, err } else { - var ( - rdpEntities = cardinality.NewBitmap64() - secondaryTargets = cardinality.NewBitmap64() - ) + computerData.RemoteDesktopUsersLocalGroup = remoteDesktopUsersLocalGroup + } - // Attempt 1: look at each RIL entity directly and see if it has membership to the RDP group. If not, and it's a group, expand its membership for further processing - for _, entity := range baseRilEntities { - if rdpLocalGroupMembers.Contains(entity.ID.Uint64()) { - // If we have membership to the RDP group, then this is a valid CanRDP entity - rdpEntities.Add(entity.ID.Uint64()) - } else if entity.Kinds.ContainsOneOf(ad.Group, ad.LocalGroup) { - secondaryTargets.Or(localGroupExpansions.Cardinality(entity.ID.Uint64()).(cardinality.Duplex[uint64])) - } + if rilEntities, err := FetchRemoteInteractiveLogonRightEntities(tx, computer); err != nil { + return nil, err + } else { + computerData.RemoteInteractiveLogonRightEntities = rilEntities + } + + return computerData, nil +} + +// HasRemoteInteractiveLogonRight looks up if the associated Remote Desktop Users Local Group has +// a valid RemoteInteractiveLogonRight edge to the computer. +func (s *CanRDPComputerData) HasRemoteInteractiveLogonRight() bool { + found := false + + s.RemoteInteractiveLogonRightDigraph.EachAdjacentNode(s.RemoteDesktopUsersLocalGroup.ID.Uint64(), graph.DirectionOutbound, func(adjacent uint64) bool { + found = adjacent == s.Computer.Uint64() + return !found + }) + + return found +} + +func FetchRemoteDesktopUsersBitmapForComputerWithURA(canRDPData *CanRDPComputerData) (cardinality.Duplex[uint64], error) { + var ( + rdpLocalGroupMembers = canRDPData.GroupMembershipCache.ReachOfComponentContainingMember(canRDPData.RemoteDesktopUsersLocalGroup.ID.Uint64(), graph.DirectionInbound) + baseRILEntities = container.AdjacentNodes(canRDPData.RemoteInteractiveLogonRightDigraph, canRDPData.Computer.Uint64(), graph.DirectionInbound) + rdpEntities = cardinality.NewBitmap64() + secondaryTargetMaps []cardinality.Duplex[uint64] + ) + + // Attempt 1: look at each RIL entity directly and see if it has membership to the RDP group. If not, and it's a group, expand its membership for further processing + for _, entityID := range baseRILEntities { + if rdpLocalGroupMembers.Contains(entityID) { + // If we have membership to the RDP group, then this is a valid CanRDP entity + rdpEntities.Add(entityID) + } else { + secondaryTargetMaps = append(secondaryTargetMaps, canRDPData.GroupMembershipCache.ReachOfComponentContainingMember(entityID, graph.DirectionInbound)) } + } - // Attempt 2: Look at each member of expanded groups and see if they have the correct permissions - for _, entity := range secondaryTargets.Slice() { - // If we have membership to the RDP group then this is a valid CanRDP entity + // Attempt 2: Look at each member of expanded groups and see if they have the correct permissions + for _, secondaryTargetMap := range secondaryTargetMaps { + // If we have membership to the RDP group then this is a valid CanRDP entity + secondaryTargetMap.Each(func(entity uint64) bool { if rdpLocalGroupMembers.Contains(entity) { rdpEntities.Add(entity) } - } - return rdpEntities, nil + return true + }) } + + return rdpEntities, nil }